@huggingface/tasks 0.13.15 → 0.13.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commonjs/local-apps.js +9 -9
- package/dist/commonjs/local-apps.spec.js +2 -8
- package/dist/commonjs/model-data.d.ts +3 -0
- package/dist/commonjs/model-data.d.ts.map +1 -1
- package/dist/commonjs/model-libraries-snippets.d.ts +3 -1
- package/dist/commonjs/model-libraries-snippets.d.ts.map +1 -1
- package/dist/commonjs/model-libraries-snippets.js +134 -22
- package/dist/commonjs/model-libraries.d.ts +12 -8
- package/dist/commonjs/model-libraries.d.ts.map +1 -1
- package/dist/commonjs/model-libraries.js +15 -8
- package/dist/commonjs/tasks/audio-to-audio/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/audio-to-audio/data.js +0 -4
- package/dist/commonjs/tasks/fill-mask/data.js +2 -2
- package/dist/commonjs/tasks/image-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-classification/data.js +2 -3
- package/dist/commonjs/tasks/image-feature-extraction/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-feature-extraction/data.js +8 -3
- package/dist/commonjs/tasks/image-text-to-text/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-text-to-text/data.js +19 -11
- package/dist/commonjs/tasks/image-to-3d/data.js +4 -4
- package/dist/commonjs/tasks/image-to-image/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-to-image/data.js +12 -4
- package/dist/commonjs/tasks/index.js +1 -1
- package/dist/commonjs/tasks/keypoint-detection/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/keypoint-detection/data.js +4 -0
- package/dist/commonjs/tasks/object-detection/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/object-detection/data.js +6 -2
- package/dist/commonjs/tasks/sentence-similarity/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/sentence-similarity/data.js +5 -1
- package/dist/commonjs/tasks/text-generation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-generation/data.js +17 -13
- package/dist/commonjs/tasks/text-to-image/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-image/data.js +4 -0
- package/dist/commonjs/tasks/text-to-speech/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-speech/data.js +9 -9
- package/dist/commonjs/tasks/text-to-video/data.js +5 -5
- package/dist/commonjs/tasks/video-text-to-text/data.js +4 -4
- package/dist/commonjs/tasks/zero-shot-classification/data.js +2 -2
- package/dist/commonjs/tasks/zero-shot-image-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/zero-shot-image-classification/data.js +8 -4
- package/dist/esm/local-apps.js +9 -9
- package/dist/esm/local-apps.spec.js +2 -8
- package/dist/esm/model-data.d.ts +3 -0
- package/dist/esm/model-data.d.ts.map +1 -1
- package/dist/esm/model-libraries-snippets.d.ts +3 -1
- package/dist/esm/model-libraries-snippets.d.ts.map +1 -1
- package/dist/esm/model-libraries-snippets.js +129 -19
- package/dist/esm/model-libraries.d.ts +12 -8
- package/dist/esm/model-libraries.d.ts.map +1 -1
- package/dist/esm/model-libraries.js +15 -8
- package/dist/esm/tasks/audio-to-audio/data.d.ts.map +1 -1
- package/dist/esm/tasks/audio-to-audio/data.js +0 -4
- package/dist/esm/tasks/fill-mask/data.js +2 -2
- package/dist/esm/tasks/image-classification/data.d.ts.map +1 -1
- package/dist/esm/tasks/image-classification/data.js +2 -3
- package/dist/esm/tasks/image-feature-extraction/data.d.ts.map +1 -1
- package/dist/esm/tasks/image-feature-extraction/data.js +8 -3
- package/dist/esm/tasks/image-text-to-text/data.d.ts.map +1 -1
- package/dist/esm/tasks/image-text-to-text/data.js +19 -11
- package/dist/esm/tasks/image-to-3d/data.js +4 -4
- package/dist/esm/tasks/image-to-image/data.d.ts.map +1 -1
- package/dist/esm/tasks/image-to-image/data.js +12 -4
- package/dist/esm/tasks/index.js +1 -1
- package/dist/esm/tasks/keypoint-detection/data.d.ts.map +1 -1
- package/dist/esm/tasks/keypoint-detection/data.js +4 -0
- package/dist/esm/tasks/object-detection/data.d.ts.map +1 -1
- package/dist/esm/tasks/object-detection/data.js +6 -2
- package/dist/esm/tasks/sentence-similarity/data.d.ts.map +1 -1
- package/dist/esm/tasks/sentence-similarity/data.js +5 -1
- package/dist/esm/tasks/text-generation/data.d.ts.map +1 -1
- package/dist/esm/tasks/text-generation/data.js +17 -13
- package/dist/esm/tasks/text-to-image/data.d.ts.map +1 -1
- package/dist/esm/tasks/text-to-image/data.js +4 -0
- package/dist/esm/tasks/text-to-speech/data.d.ts.map +1 -1
- package/dist/esm/tasks/text-to-speech/data.js +9 -9
- package/dist/esm/tasks/text-to-video/data.js +5 -5
- package/dist/esm/tasks/video-text-to-text/data.js +4 -4
- package/dist/esm/tasks/zero-shot-classification/data.js +2 -2
- package/dist/esm/tasks/zero-shot-image-classification/data.d.ts.map +1 -1
- package/dist/esm/tasks/zero-shot-image-classification/data.js +8 -4
- package/package.json +1 -1
- package/src/local-apps.spec.ts +2 -8
- package/src/local-apps.ts +9 -9
- package/src/model-data.ts +3 -0
- package/src/model-libraries-snippets.ts +141 -19
- package/src/model-libraries.ts +15 -8
- package/src/tasks/audio-to-audio/data.ts +0 -4
- package/src/tasks/fill-mask/data.ts +2 -2
- package/src/tasks/image-classification/data.ts +2 -3
- package/src/tasks/image-feature-extraction/data.ts +8 -3
- package/src/tasks/image-text-to-text/about.md +8 -3
- package/src/tasks/image-text-to-text/data.ts +19 -11
- package/src/tasks/image-to-3d/data.ts +4 -4
- package/src/tasks/image-to-image/data.ts +12 -5
- package/src/tasks/index.ts +1 -1
- package/src/tasks/keypoint-detection/data.ts +4 -0
- package/src/tasks/object-detection/data.ts +6 -2
- package/src/tasks/sentence-similarity/data.ts +5 -1
- package/src/tasks/text-generation/data.ts +17 -14
- package/src/tasks/text-to-image/data.ts +4 -0
- package/src/tasks/text-to-speech/data.ts +9 -10
- package/src/tasks/text-to-video/data.ts +5 -5
- package/src/tasks/video-text-to-text/data.ts +4 -4
- package/src/tasks/zero-shot-classification/data.ts +2 -2
- package/src/tasks/zero-shot-image-classification/data.ts +8 -4
|
@@ -29,17 +29,17 @@ function isMlxModel(model) {
|
|
|
29
29
|
return model.tags.includes("mlx");
|
|
30
30
|
}
|
|
31
31
|
const snippetLlamacpp = (model, filepath) => {
|
|
32
|
+
let tagName = "";
|
|
33
|
+
if (filepath) {
|
|
34
|
+
const quantLabel = (0, gguf_js_1.parseGGUFQuantLabel)(filepath);
|
|
35
|
+
tagName = quantLabel ? `:${quantLabel}` : "";
|
|
36
|
+
}
|
|
32
37
|
const command = (binary) => {
|
|
33
|
-
const snippet = [
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
` --hf-repo "${model.id}" \\`,
|
|
37
|
-
` --hf-file ${filepath ?? "{{GGUF_FILE}}"} \\`,
|
|
38
|
-
` -p "${model.tags.includes("conversational") ? "You are a helpful assistant" : "Once upon a time,"}"`,
|
|
39
|
-
];
|
|
40
|
-
if (model.tags.includes("conversational")) {
|
|
38
|
+
const snippet = ["# Load and run the model:", `${binary} -hf ${model.id}${tagName}`];
|
|
39
|
+
if (!model.tags.includes("conversational")) {
|
|
40
|
+
// for non-conversational models, add a prompt
|
|
41
41
|
snippet[snippet.length - 1] += " \\";
|
|
42
|
-
snippet.push(
|
|
42
|
+
snippet.push(' -p "Once upon a time,"');
|
|
43
43
|
}
|
|
44
44
|
return snippet.join("\n");
|
|
45
45
|
};
|
|
@@ -12,11 +12,7 @@ const local_apps_js_1 = require("./local-apps.js");
|
|
|
12
12
|
};
|
|
13
13
|
const snippet = snippetFunc(model);
|
|
14
14
|
(0, vitest_1.expect)(snippet[0].content).toEqual(`# Load and run the model:
|
|
15
|
-
llama-cli
|
|
16
|
-
--hf-repo "bartowski/Llama-3.2-3B-Instruct-GGUF" \\
|
|
17
|
-
--hf-file {{GGUF_FILE}} \\
|
|
18
|
-
-p "You are a helpful assistant" \\
|
|
19
|
-
--conversation`);
|
|
15
|
+
llama-cli -hf bartowski/Llama-3.2-3B-Instruct-GGUF`);
|
|
20
16
|
});
|
|
21
17
|
(0, vitest_1.it)("llama.cpp non-conversational", async () => {
|
|
22
18
|
const { snippet: snippetFunc } = local_apps_js_1.LOCAL_APPS["llama.cpp"];
|
|
@@ -27,9 +23,7 @@ llama-cli \\
|
|
|
27
23
|
};
|
|
28
24
|
const snippet = snippetFunc(model);
|
|
29
25
|
(0, vitest_1.expect)(snippet[0].content).toEqual(`# Load and run the model:
|
|
30
|
-
llama-cli \\
|
|
31
|
-
--hf-repo "mlabonne/gemma-2b-GGUF" \\
|
|
32
|
-
--hf-file {{GGUF_FILE}} \\
|
|
26
|
+
llama-cli -hf mlabonne/gemma-2b-GGUF \\
|
|
33
27
|
-p "Once upon a time,"`);
|
|
34
28
|
});
|
|
35
29
|
(0, vitest_1.it)("vLLM conversational llm", async () => {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"model-data.d.ts","sourceRoot":"","sources":["../../src/model-data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AACnD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AACzD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,qBAAqB,CAAC;AAE3D;;GAEG;AACH,MAAM,WAAW,SAAS;IACzB;;OAEG;IACH,EAAE,EAAE,MAAM,CAAC;IACX;;;OAGG;IACH,SAAS,EAAE,MAAM,CAAC;IAClB;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAClB;;OAEG;IACH,MAAM,CAAC,EAAE;QACR,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;QACzB;;WAEG;QACH,QAAQ,CAAC,EAAE;YACV;;eAEG;YACH,CAAC,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;SACpB,CAAC;QACF,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB,mBAAmB,CAAC,EAAE;YACrB,IAAI,CAAC,EAAE,MAAM,CAAC;YACd,YAAY,CAAC,EAAE,OAAO,CAAC;YACvB,YAAY,CAAC,EAAE,OAAO,CAAC;YACvB;;eAEG;YACH,YAAY,CAAC,EAAE,MAAM,CAAC;SACtB,CAAC;QACF,gBAAgB,CAAC,EAAE,eAAe,CAAC;QACnC,oBAAoB,CAAC,EAAE;YACtB,UAAU,CAAC,EAAE,MAAM,CAAC;YACpB,WAAW,CAAC,EAAE,MAAM,CAAC;SACrB,CAAC;QACF,SAAS,CAAC,EAAE;YACX,WAAW,CAAC,EAAE,MAAM,CAAC;SACrB,CAAC;QACF,OAAO,CAAC,EAAE;YACT,KAAK,CAAC,EAAE;gBACP,IAAI,CAAC,EAAE,MAAM,CAAC;aACd,CAAC;YACF,YAAY,CAAC,EAAE,MAAM,CAAC;SACtB,CAAC;QACF,WAAW,CAAC,EAAE;YACb,qBAAqB,CAAC,EAAE,MAAM,CAAC;YAC/B,iBAAiB,CAAC,EAAE,MAAM,CAAC;YAC3B,gBAAgB,CAAC,EAAE,MAAM,CAAC;SAC1B,CAAC;QACF,IAAI,CAAC,EAAE;YACN,uBAAuB,CAAC,EAAE,MAAM,CAAC;YACjC,SAAS,CAAC,EAAE,MAAM,CAAC;SACnB,CAAC;KACF,CAAC;IACF;;OAEG;IACH,IAAI,EAAE,MAAM,EAAE,CAAC;IACf;;OAEG;IACH,gBAAgB,CAAC,EAAE,gBAAgB,CAAC;IACpC;;OAEG;IACH,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;IAChC;;;;;OAKG;IACH,UAAU,CAAC,EAAE,aAAa,EAAE,GAAG,SAAS,CAAC;IACzC;;;;;;;;;OASG;IACH,QAAQ,CAAC,EAAE;QACV,SAAS,CAAC,EACP,OAAO,GACP;YACA,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;SACpC,CAAC;QACL,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;QAC/B,eAAe,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;KAChC,CAAC;IACF;;;OAGG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,WAAW,CAAC,EAAE;QACb,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QACnC,KAAK,EAAE,MAAM,CAAC;QACd,OAAO,EAAE,OAAO,CAAC;KACjB,CAAC;IACF,IAAI,CAAC,EAAE;QACN,KAAK,EAAE,MAAM,CAAC;QACd,YAAY,CAAC,EAAE,MAAM,CAAC;QACtB,cAAc,CAAC,EAAE,MAAM,CAAC;KACxB,CAAC;CACF;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,UAAU,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB;;OAEG;IACH,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB"}
|
|
1
|
+
{"version":3,"file":"model-data.d.ts","sourceRoot":"","sources":["../../src/model-data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AACnD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AACzD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,qBAAqB,CAAC;AAE3D;;GAEG;AACH,MAAM,WAAW,SAAS;IACzB;;OAEG;IACH,EAAE,EAAE,MAAM,CAAC;IACX;;;OAGG;IACH,SAAS,EAAE,MAAM,CAAC;IAClB;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAClB;;OAEG;IACH,MAAM,CAAC,EAAE;QACR,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;QACzB;;WAEG;QACH,QAAQ,CAAC,EAAE;YACV;;eAEG;YACH,CAAC,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;SACpB,CAAC;QACF,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB,mBAAmB,CAAC,EAAE;YACrB,IAAI,CAAC,EAAE,MAAM,CAAC;YACd,YAAY,CAAC,EAAE,OAAO,CAAC;YACvB,YAAY,CAAC,EAAE,OAAO,CAAC;YACvB;;eAEG;YACH,YAAY,CAAC,EAAE,MAAM,CAAC;SACtB,CAAC;QACF,gBAAgB,CAAC,EAAE,eAAe,CAAC;QACnC,oBAAoB,CAAC,EAAE;YACtB,UAAU,CAAC,EAAE,MAAM,CAAC;YACpB,WAAW,CAAC,EAAE,MAAM,CAAC;SACrB,CAAC;QACF,SAAS,CAAC,EAAE;YACX,WAAW,CAAC,EAAE,MAAM,CAAC;SACrB,CAAC;QACF,OAAO,CAAC,EAAE;YACT,KAAK,CAAC,EAAE;gBACP,IAAI,CAAC,EAAE,MAAM,CAAC;aACd,CAAC;YACF,YAAY,CAAC,EAAE,MAAM,CAAC;SACtB,CAAC;QACF,WAAW,CAAC,EAAE;YACb,qBAAqB,CAAC,EAAE,MAAM,CAAC;YAC/B,iBAAiB,CAAC,EAAE,MAAM,CAAC;YAC3B,gBAAgB,CAAC,EAAE,MAAM,CAAC;SAC1B,CAAC;QACF,IAAI,CAAC,EAAE;YACN,uBAAuB,CAAC,EAAE,MAAM,CAAC;YACjC,SAAS,CAAC,EAAE,MAAM,CAAC;SACnB,CAAC;QACF,SAAS,CAAC,EAAE;YACX,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;SACjB,CAAC;KACF,CAAC;IACF;;OAEG;IACH,IAAI,EAAE,MAAM,EAAE,CAAC;IACf;;OAEG;IACH,gBAAgB,CAAC,EAAE,gBAAgB,CAAC;IACpC;;OAEG;IACH,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;IAChC;;;;;OAKG;IACH,UAAU,CAAC,EAAE,aAAa,EAAE,GAAG,SAAS,CAAC;IACzC;;;;;;;;;OASG;IACH,QAAQ,CAAC,EAAE;QACV,SAAS,CAAC,EACP,OAAO,GACP;YACA,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;SACpC,CAAC;QACL,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;QAC/B,eAAe,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;KAChC,CAAC;IACF;;;OAGG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,WAAW,CAAC,EAAE;QACb,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QACnC,KAAK,EAAE,MAAM,CAAC;QACd,OAAO,EAAE,OAAO,CAAC;KACjB,CAAC;IACF,IAAI,CAAC,EAAE;QACN,KAAK,EAAE,MAAM,CAAC;QACd,YAAY,CAAC,EAAE,MAAM,CAAC;QACtB,cAAc,CAAC,EAAE,MAAM,CAAC;KACxB,CAAC;CACF;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,UAAU,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB;;OAEG;IACH,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB"}
|
|
@@ -5,8 +5,10 @@ export declare const asteroid: (model: ModelData) => string[];
|
|
|
5
5
|
export declare const audioseal: (model: ModelData) => string[];
|
|
6
6
|
export declare const bertopic: (model: ModelData) => string[];
|
|
7
7
|
export declare const bm25s: (model: ModelData) => string[];
|
|
8
|
+
export declare const cxr_foundation: (model: ModelData) => string[];
|
|
8
9
|
export declare const depth_anything_v2: (model: ModelData) => string[];
|
|
9
10
|
export declare const depth_pro: (model: ModelData) => string[];
|
|
11
|
+
export declare const derm_foundation: (model: ModelData) => string[];
|
|
10
12
|
export declare const diffusers: (model: ModelData) => string[];
|
|
11
13
|
export declare const diffusionkit: (model: ModelData) => string[];
|
|
12
14
|
export declare const cartesia_pytorch: (model: ModelData) => string[];
|
|
@@ -20,7 +22,6 @@ export declare const flair: (model: ModelData) => string[];
|
|
|
20
22
|
export declare const gliner: (model: ModelData) => string[];
|
|
21
23
|
export declare const htrflow: (model: ModelData) => string[];
|
|
22
24
|
export declare const keras: (model: ModelData) => string[];
|
|
23
|
-
export declare const keras_nlp: (model: ModelData) => string[];
|
|
24
25
|
export declare const keras_hub: (model: ModelData) => string[];
|
|
25
26
|
export declare const llama_cpp_python: (model: ModelData) => string[];
|
|
26
27
|
export declare const tf_keras: (model: ModelData) => string[];
|
|
@@ -47,6 +48,7 @@ export declare const spacy: (model: ModelData) => string[];
|
|
|
47
48
|
export declare const span_marker: (model: ModelData) => string[];
|
|
48
49
|
export declare const stanza: (model: ModelData) => string[];
|
|
49
50
|
export declare const speechbrain: (model: ModelData) => string[];
|
|
51
|
+
export declare const terratorch: (model: ModelData) => string[];
|
|
50
52
|
export declare const transformers: (model: ModelData) => string[];
|
|
51
53
|
export declare const transformersJS: (model: ModelData) => string[];
|
|
52
54
|
export declare const peft: (model: ModelData) => string[];
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"model-libraries-snippets.d.ts","sourceRoot":"","sources":["../../src/model-libraries-snippets.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AAkBjD,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAkBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAkBlD,CAAC;AAaF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,iBAAiB,UAAW,SAAS,KAAG,MAAM,EA6C1D,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAwBlD,CAAC;
|
|
1
|
+
{"version":3,"file":"model-libraries-snippets.d.ts","sourceRoot":"","sources":["../../src/model-libraries-snippets.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AAkBjD,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAkBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAkBlD,CAAC;AAaF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAqBvD,CAAC;AAEF,eAAO,MAAM,iBAAiB,UAAW,SAAS,KAAG,MAAM,EA6C1D,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAwBlD,CAAC;AAEF,eAAO,MAAM,eAAe,UAAW,SAAS,KAAG,MAAM,EAoBxD,CAAA;AAuCD,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EAwCrD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAgBzD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EAmBrD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAgB/C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAMlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EASlD,CAAC;AAIF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAO/C,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAMhD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAehD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAS9C,CAAC;AA4EF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAsBlD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EA0BzD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAOjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAIlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,aAAa,QAAO,MAAM,EAQtC,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAsBlD,CAAC;AAEF,eAAO,MAAM,uBAAuB,UAAW,SAAS,KAAG,MAAM,EAehE,CAAC;AAiBF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAyBF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAOtD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAI7C,CAAC;AAEF,eAAO,MAAM,OAAO,QAA6B,MAAM,EAQtD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAanC,CAAC;AAsCF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAehD,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,SAAS,KAAG,MAAM,EAmC3D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EA2B7C,CAAC;AAEF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAEtD,CAAC;AASF,eAAO,MAAM,oBAAoB,UAAW,SAAS,KAAG,MAAM,EAoB7D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAU9C,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAIpD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAK/C,CAAC;AAkBF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAkBpD,CAAC;AAEF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAInD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EA4CrD,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAcvD,CAAC;AAiBF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAiB7C,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAMzD,CAAC;AAgBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAEjD,CAAC;AAEF,eAAO,MAAM,MAAM,QAA6B,MAAM,EAMrD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAkB7C,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAInD,CAAC;AAEF,eAAO,MAAM,OAAO,QAAO,MAAM,EAYhC,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAiBpD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAYjD,CAAC;AAEF,eAAO,MAAM,GAAG,UAAW,SAAS,KAAG,MAAM,EAK5C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAIlD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAQ7C,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAI7C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AA4BF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAO/C,CAAC;AAEF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAUnD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAYnC,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC"}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.
|
|
4
|
-
exports.hezar = exports.threedtopia_xl = exports.whisperkit = exports.audiocraft = exports.anemoi = exports.pythae = exports.pxia = exports.nemo = exports.model2vec = exports.mlxim = exports.mlx = exports.birefnet = exports.ultralytics = exports.chattts = exports.voicecraft = exports.vfimamba = exports.sana = exports.sentis = exports.mlAgents = exports.stableBaselines3 = exports.fasttext = exports.peft = void 0;
|
|
3
|
+
exports.terratorch = exports.speechbrain = exports.stanza = exports.span_marker = exports.spacy = exports.setfit = exports.sentenceTransformers = exports.sampleFactory = exports.sam2 = exports.fastai = exports.stable_audio_tools = exports.sklearn = exports.seed_story = exports.saelens = exports.timm = exports.tensorflowtts = exports.relik = exports.pyannote_audio = exports.pyannote_audio_pipeline = exports.paddlenlp = exports.open_clip = exports.mesh_anything = exports.mars5_tts = exports.mamba_ssm = exports.tf_keras = exports.llama_cpp_python = exports.keras_hub = exports.keras = exports.htrflow = exports.gliner = exports.flair = exports.fairseq = exports.espnet = exports.espnetASR = exports.espnetTTS = exports.edsnlp = exports.cartesia_mlx = exports.cartesia_pytorch = exports.diffusionkit = exports.diffusers = exports.derm_foundation = exports.depth_pro = exports.depth_anything_v2 = exports.cxr_foundation = exports.bm25s = exports.bertopic = exports.audioseal = exports.asteroid = exports.allennlp = exports.adapters = void 0;
|
|
4
|
+
exports.hezar = exports.threedtopia_xl = exports.whisperkit = exports.audiocraft = exports.anemoi = exports.pythae = exports.pxia = exports.nemo = exports.model2vec = exports.mlxim = exports.mlx = exports.birefnet = exports.ultralytics = exports.chattts = exports.voicecraft = exports.vfimamba = exports.sana = exports.sentis = exports.mlAgents = exports.stableBaselines3 = exports.fasttext = exports.peft = exports.transformersJS = exports.transformers = void 0;
|
|
5
5
|
const library_to_tasks_js_1 = require("./library-to-tasks.js");
|
|
6
6
|
const inputs_js_1 = require("./snippets/inputs.js");
|
|
7
7
|
const common_js_1 = require("./snippets/common.js");
|
|
@@ -86,6 +86,29 @@ const bm25s = (model) => [
|
|
|
86
86
|
retriever = BM25HF.load_from_hub("${model.id}")`,
|
|
87
87
|
];
|
|
88
88
|
exports.bm25s = bm25s;
|
|
89
|
+
const cxr_foundation = (model) => [
|
|
90
|
+
`!git clone https://github.com/Google-Health/cxr-foundation.git
|
|
91
|
+
import tensorflow as tf, sys, requests
|
|
92
|
+
sys.path.append('cxr-foundation/python/')
|
|
93
|
+
|
|
94
|
+
# Install dependencies
|
|
95
|
+
major_version = tf.__version__.rsplit(".", 1)[0]
|
|
96
|
+
!pip install tensorflow-text=={major_version} pypng && pip install --no-deps pydicom hcls_imaging_ml_toolkit retrying
|
|
97
|
+
|
|
98
|
+
# Load image (Stillwaterising, CC0, via Wikimedia Commons)
|
|
99
|
+
from PIL import Image
|
|
100
|
+
from io import BytesIO
|
|
101
|
+
image_url = "https://upload.wikimedia.org/wikipedia/commons/c/c8/Chest_Xray_PA_3-8-2010.png"
|
|
102
|
+
response = requests.get(image_url, headers={'User-Agent': 'Demo'}, stream=True)
|
|
103
|
+
response.raw.decode_content = True # Ensure correct decoding
|
|
104
|
+
img = Image.open(BytesIO(response.content)).convert('L') # Convert to grayscale
|
|
105
|
+
|
|
106
|
+
# Run inference
|
|
107
|
+
from clientside.clients import make_hugging_face_client
|
|
108
|
+
cxr_client = make_hugging_face_client('cxr_model')
|
|
109
|
+
print(cxr_client.get_image_embeddings_from_images([img]))`,
|
|
110
|
+
];
|
|
111
|
+
exports.cxr_foundation = cxr_foundation;
|
|
89
112
|
const depth_anything_v2 = (model) => {
|
|
90
113
|
let encoder;
|
|
91
114
|
let features;
|
|
@@ -156,6 +179,28 @@ focallength_px = prediction["focallength_px"]`;
|
|
|
156
179
|
return [installSnippet, inferenceSnippet];
|
|
157
180
|
};
|
|
158
181
|
exports.depth_pro = depth_pro;
|
|
182
|
+
const derm_foundation = (model) => [
|
|
183
|
+
`from huggingface_hub import from_pretrained_keras
|
|
184
|
+
import tensorflow as tf, requests
|
|
185
|
+
|
|
186
|
+
# Load and format input
|
|
187
|
+
IMAGE_URL = "https://storage.googleapis.com/dx-scin-public-data/dataset/images/3445096909671059178.png"
|
|
188
|
+
input_tensor = tf.train.Example(
|
|
189
|
+
features=tf.train.Features(
|
|
190
|
+
feature={
|
|
191
|
+
"image/encoded": tf.train.Feature(
|
|
192
|
+
bytes_list=tf.train.BytesList(value=[requests.get(IMAGE_URL, stream=True).content])
|
|
193
|
+
)
|
|
194
|
+
}
|
|
195
|
+
)
|
|
196
|
+
).SerializeToString()
|
|
197
|
+
|
|
198
|
+
# Load model and run inference
|
|
199
|
+
loaded_model = from_pretrained_keras("google/derm-foundation")
|
|
200
|
+
infer = loaded_model.signatures["serving_default"]
|
|
201
|
+
print(infer(inputs=tf.constant([input_tensor])))`,
|
|
202
|
+
];
|
|
203
|
+
exports.derm_foundation = derm_foundation;
|
|
159
204
|
const diffusersDefaultPrompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k";
|
|
160
205
|
const diffusers_default = (model) => [
|
|
161
206
|
`from diffusers import DiffusionPipeline
|
|
@@ -376,32 +421,93 @@ model = keras.saving.load_model("hf://${model.id}")
|
|
|
376
421
|
`,
|
|
377
422
|
];
|
|
378
423
|
exports.keras = keras;
|
|
379
|
-
const
|
|
380
|
-
|
|
381
|
-
import os
|
|
382
|
-
os.environ["KERAS_BACKEND"] = "jax"
|
|
424
|
+
const _keras_hub_causal_lm = (modelId) => `
|
|
425
|
+
import keras_hub
|
|
383
426
|
|
|
384
|
-
|
|
427
|
+
# Load CausalLM model (optional: use half precision for inference)
|
|
428
|
+
causal_lm = keras_hub.models.CausalLM.from_preset(${modelId}, dtype="bfloat16")
|
|
429
|
+
causal_lm.compile(sampler="greedy") # (optional) specify a sampler
|
|
385
430
|
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
const keras_hub = (model) => [
|
|
392
|
-
`# Available backend options are: "jax", "torch", "tensorflow".
|
|
393
|
-
import os
|
|
394
|
-
os.environ["KERAS_BACKEND"] = "jax"
|
|
431
|
+
# Generate text
|
|
432
|
+
causal_lm.generate("Keras: deep learning for", max_length=64)
|
|
433
|
+
`;
|
|
434
|
+
const _keras_hub_text_to_image = (modelId) => `
|
|
435
|
+
import keras_hub
|
|
395
436
|
|
|
437
|
+
# Load TextToImage model (optional: use half precision for inference)
|
|
438
|
+
text_to_image = keras_hub.models.TextToImage.from_preset(${modelId}, dtype="bfloat16")
|
|
439
|
+
|
|
440
|
+
# Generate images with a TextToImage model.
|
|
441
|
+
text_to_image.generate("Astronaut in a jungle")
|
|
442
|
+
`;
|
|
443
|
+
const _keras_hub_text_classifier = (modelId) => `
|
|
396
444
|
import keras_hub
|
|
397
445
|
|
|
398
|
-
# Load
|
|
399
|
-
|
|
446
|
+
# Load TextClassifier model
|
|
447
|
+
text_classifier = keras_hub.models.TextClassifier.from_preset(
|
|
448
|
+
${modelId},
|
|
449
|
+
num_classes=2,
|
|
450
|
+
)
|
|
451
|
+
# Fine-tune
|
|
452
|
+
text_classifier.fit(x=["Thilling adventure!", "Total snoozefest."], y=[1, 0])
|
|
453
|
+
# Classify text
|
|
454
|
+
text_classifier.predict(["Not my cup of tea."])
|
|
455
|
+
`;
|
|
456
|
+
const _keras_hub_image_classifier = (modelId) => `
|
|
457
|
+
import keras_hub
|
|
458
|
+
import keras
|
|
400
459
|
|
|
401
|
-
#
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
460
|
+
# Load ImageClassifier model
|
|
461
|
+
image_classifier = keras_hub.models.ImageClassifier.from_preset(
|
|
462
|
+
${modelId},
|
|
463
|
+
num_classes=2,
|
|
464
|
+
)
|
|
465
|
+
# Fine-tune
|
|
466
|
+
image_classifier.fit(
|
|
467
|
+
x=keras.random.randint((32, 64, 64, 3), 0, 256),
|
|
468
|
+
y=keras.random.randint((32, 1), 0, 2),
|
|
469
|
+
)
|
|
470
|
+
# Classify image
|
|
471
|
+
image_classifier.predict(keras.random.randint((1, 64, 64, 3), 0, 256))
|
|
472
|
+
`;
|
|
473
|
+
const _keras_hub_tasks_with_example = {
|
|
474
|
+
CausalLM: _keras_hub_causal_lm,
|
|
475
|
+
TextToImage: _keras_hub_text_to_image,
|
|
476
|
+
TextClassifier: _keras_hub_text_classifier,
|
|
477
|
+
ImageClassifier: _keras_hub_image_classifier,
|
|
478
|
+
};
|
|
479
|
+
const _keras_hub_task_without_example = (task, modelId) => `
|
|
480
|
+
import keras_hub
|
|
481
|
+
|
|
482
|
+
# Create a ${task} model
|
|
483
|
+
task = keras_hub.models.${task}.from_preset(${modelId})
|
|
484
|
+
`;
|
|
485
|
+
const _keras_hub_generic_backbone = (modelId) => `
|
|
486
|
+
import keras_hub
|
|
487
|
+
|
|
488
|
+
# Create a Backbone model unspecialized for any task
|
|
489
|
+
backbone = keras_hub.models.Backbone.from_preset(${modelId})
|
|
490
|
+
`;
|
|
491
|
+
const keras_hub = (model) => {
|
|
492
|
+
const modelId = model.id;
|
|
493
|
+
const tasks = model.config?.keras_hub?.tasks ?? [];
|
|
494
|
+
const snippets = [];
|
|
495
|
+
// First, generate tasks with examples
|
|
496
|
+
for (const [task, snippet] of Object.entries(_keras_hub_tasks_with_example)) {
|
|
497
|
+
if (tasks.includes(task)) {
|
|
498
|
+
snippets.push(snippet(modelId));
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
// Then, add remaining tasks
|
|
502
|
+
for (const task in tasks) {
|
|
503
|
+
if (!Object.keys(_keras_hub_tasks_with_example).includes(task)) {
|
|
504
|
+
snippets.push(_keras_hub_task_without_example(task, modelId));
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
// Finally, add generic backbone snippet
|
|
508
|
+
snippets.push(_keras_hub_generic_backbone(modelId));
|
|
509
|
+
return snippets;
|
|
510
|
+
};
|
|
405
511
|
exports.keras_hub = keras_hub;
|
|
406
512
|
const llama_cpp_python = (model) => {
|
|
407
513
|
const snippets = [
|
|
@@ -817,6 +923,12 @@ model.${speechbrainMethod}("file.wav")`,
|
|
|
817
923
|
];
|
|
818
924
|
};
|
|
819
925
|
exports.speechbrain = speechbrain;
|
|
926
|
+
const terratorch = (model) => [
|
|
927
|
+
`from terratorch.registry import BACKBONE_REGISTRY
|
|
928
|
+
|
|
929
|
+
model = BACKBONE_REGISTRY.build("${model.id}")`,
|
|
930
|
+
];
|
|
931
|
+
exports.terratorch = terratorch;
|
|
820
932
|
const transformers = (model) => {
|
|
821
933
|
const info = model.transformersInfo;
|
|
822
934
|
if (!info) {
|
|
@@ -172,6 +172,7 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
172
172
|
prettyLabel: string;
|
|
173
173
|
repoName: string;
|
|
174
174
|
repoUrl: string;
|
|
175
|
+
snippets: (model: ModelData) => string[];
|
|
175
176
|
filter: false;
|
|
176
177
|
countDownloads: string;
|
|
177
178
|
};
|
|
@@ -202,6 +203,7 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
202
203
|
prettyLabel: string;
|
|
203
204
|
repoName: string;
|
|
204
205
|
repoUrl: string;
|
|
206
|
+
snippets: (model: ModelData) => string[];
|
|
205
207
|
filter: false;
|
|
206
208
|
countDownloads: string;
|
|
207
209
|
};
|
|
@@ -398,13 +400,6 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
398
400
|
snippets: (model: ModelData) => string[];
|
|
399
401
|
countDownloads: string;
|
|
400
402
|
};
|
|
401
|
-
"keras-nlp": {
|
|
402
|
-
prettyLabel: string;
|
|
403
|
-
repoName: string;
|
|
404
|
-
repoUrl: string;
|
|
405
|
-
docsUrl: string;
|
|
406
|
-
snippets: (model: ModelData) => string[];
|
|
407
|
-
};
|
|
408
403
|
"keras-hub": {
|
|
409
404
|
prettyLabel: string;
|
|
410
405
|
repoName: string;
|
|
@@ -774,6 +769,15 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
774
769
|
repoName: string;
|
|
775
770
|
repoUrl: string;
|
|
776
771
|
};
|
|
772
|
+
terratorch: {
|
|
773
|
+
prettyLabel: string;
|
|
774
|
+
repoName: string;
|
|
775
|
+
repoUrl: string;
|
|
776
|
+
docsUrl: string;
|
|
777
|
+
filter: false;
|
|
778
|
+
countDownloads: string;
|
|
779
|
+
snippets: (model: ModelData) => string[];
|
|
780
|
+
};
|
|
777
781
|
"tic-clip": {
|
|
778
782
|
prettyLabel: string;
|
|
779
783
|
repoName: string;
|
|
@@ -884,5 +888,5 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
884
888
|
};
|
|
885
889
|
export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
|
|
886
890
|
export declare const ALL_MODEL_LIBRARY_KEYS: ModelLibraryKey[];
|
|
887
|
-
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-
|
|
891
|
+
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "terratorch" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
|
|
888
892
|
//# sourceMappingURL=model-libraries.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AACjD,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,gCAAgC,CAAC;AAEzE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B
|
|
1
|
+
{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AACjD,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,gCAAgC,CAAC;AAEzE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAs0BI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,EAA+C,eAAe,EAAE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,0gDAQ1B,CAAC"}
|
|
@@ -158,6 +158,7 @@ exports.MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
158
158
|
prettyLabel: "CXR Foundation",
|
|
159
159
|
repoName: "cxr-foundation",
|
|
160
160
|
repoUrl: "https://github.com/google-health/cxr-foundation",
|
|
161
|
+
snippets: snippets.cxr_foundation,
|
|
161
162
|
filter: false,
|
|
162
163
|
countDownloads: `path:"precomputed_embeddings/embeddings.npz" OR path:"pax-elixr-b-text/saved_model.pb"`,
|
|
163
164
|
},
|
|
@@ -188,6 +189,7 @@ exports.MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
188
189
|
prettyLabel: "Derm Foundation",
|
|
189
190
|
repoName: "derm-foundation",
|
|
190
191
|
repoUrl: "https://github.com/google-health/derm-foundation",
|
|
192
|
+
snippets: snippets.derm_foundation,
|
|
191
193
|
filter: false,
|
|
192
194
|
countDownloads: `path:"scin_dataset_precomputed_embeddings.npz" OR path:"saved_model.pb"`,
|
|
193
195
|
},
|
|
@@ -386,13 +388,6 @@ exports.MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
386
388
|
snippets: snippets.tf_keras,
|
|
387
389
|
countDownloads: `path:"saved_model.pb"`,
|
|
388
390
|
},
|
|
389
|
-
"keras-nlp": {
|
|
390
|
-
prettyLabel: "KerasNLP",
|
|
391
|
-
repoName: "KerasNLP",
|
|
392
|
-
repoUrl: "https://github.com/keras-team/keras-nlp",
|
|
393
|
-
docsUrl: "https://keras.io/keras_nlp/",
|
|
394
|
-
snippets: snippets.keras_nlp,
|
|
395
|
-
},
|
|
396
391
|
"keras-hub": {
|
|
397
392
|
prettyLabel: "KerasHub",
|
|
398
393
|
repoName: "KerasHub",
|
|
@@ -526,7 +521,10 @@ exports.MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
526
521
|
repoUrl: "https://github.com/mlfoundations/open_clip",
|
|
527
522
|
snippets: snippets.open_clip,
|
|
528
523
|
filter: true,
|
|
529
|
-
countDownloads: `
|
|
524
|
+
countDownloads: `path:"open_clip_model.safetensors"
|
|
525
|
+
OR path:"model.safetensors"
|
|
526
|
+
OR path:"open_clip_pytorch_model.bin"
|
|
527
|
+
OR path:"pytorch_model.bin"`,
|
|
530
528
|
},
|
|
531
529
|
paddlenlp: {
|
|
532
530
|
prettyLabel: "paddlenlp",
|
|
@@ -762,6 +760,15 @@ exports.MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
762
760
|
repoName: "TabPFN",
|
|
763
761
|
repoUrl: "https://github.com/PriorLabs/TabPFN",
|
|
764
762
|
},
|
|
763
|
+
terratorch: {
|
|
764
|
+
prettyLabel: "TerraTorch",
|
|
765
|
+
repoName: "TerraTorch",
|
|
766
|
+
repoUrl: "https://github.com/IBM/terratorch",
|
|
767
|
+
docsUrl: "https://ibm.github.io/terratorch/",
|
|
768
|
+
filter: false,
|
|
769
|
+
countDownloads: `path_extension:"pt"`,
|
|
770
|
+
snippets: snippets.terratorch,
|
|
771
|
+
},
|
|
765
772
|
"tic-clip": {
|
|
766
773
|
prettyLabel: "TiC-CLIP",
|
|
767
774
|
repoName: "TiC-CLIP",
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-to-audio/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-to-audio/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cA6Df,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
|
@@ -59,8 +59,8 @@ const taskData = {
|
|
|
59
59
|
],
|
|
60
60
|
models: [
|
|
61
61
|
{
|
|
62
|
-
description: "
|
|
63
|
-
id: "
|
|
62
|
+
description: "State-of-the-art masked language model.",
|
|
63
|
+
id: "answerdotai/ModernBERT-large",
|
|
64
64
|
},
|
|
65
65
|
{
|
|
66
66
|
description: "A multilingual model trained on 100 languages.",
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAkFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
|
@@ -74,9 +74,8 @@ const taskData = {
|
|
|
74
74
|
],
|
|
75
75
|
spaces: [
|
|
76
76
|
{
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
id: "nielsr/perceiver-image-classification",
|
|
77
|
+
description: "A leaderboard to evaluate different image classification models.",
|
|
78
|
+
id: "timm/leaderboard",
|
|
80
79
|
},
|
|
81
80
|
],
|
|
82
81
|
summary: "Image classification is the task of assigning a label or class to an entire image. Images are expected to have only one class for each image. Image classification models take an image as input and return a prediction about which class the image belongs to.",
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-feature-extraction/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-feature-extraction/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cA2Df,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
|
@@ -42,15 +42,20 @@ const taskData = {
|
|
|
42
42
|
id: "facebook/dino-vitb16",
|
|
43
43
|
},
|
|
44
44
|
{
|
|
45
|
-
description: "
|
|
46
|
-
id: "
|
|
45
|
+
description: "Cutting-edge image feature extraction model.",
|
|
46
|
+
id: "apple/aimv2-large-patch14-336-distilled",
|
|
47
47
|
},
|
|
48
48
|
{
|
|
49
49
|
description: "Strong image feature extraction model that can be used on images and documents.",
|
|
50
50
|
id: "OpenGVLab/InternViT-6B-448px-V1-2",
|
|
51
51
|
},
|
|
52
52
|
],
|
|
53
|
-
spaces: [
|
|
53
|
+
spaces: [
|
|
54
|
+
{
|
|
55
|
+
description: "A leaderboard to evaluate different image-feature-extraction models on classification performances",
|
|
56
|
+
id: "timm/leaderboard",
|
|
57
|
+
},
|
|
58
|
+
],
|
|
54
59
|
summary: "Image feature extraction is the task of extracting features learnt in a computer vision model.",
|
|
55
60
|
widgetModels: [],
|
|
56
61
|
};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-text-to-text/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-text-to-text/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAyGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
|
@@ -7,8 +7,8 @@ const taskData = {
|
|
|
7
7
|
id: "liuhaotian/LLaVA-Instruct-150K",
|
|
8
8
|
},
|
|
9
9
|
{
|
|
10
|
-
description: "
|
|
11
|
-
id: "
|
|
10
|
+
description: "Collection of image-text pairs on scientific topics.",
|
|
11
|
+
id: "DAMO-NLP-SG/multimodal_textbook",
|
|
12
12
|
},
|
|
13
13
|
{
|
|
14
14
|
description: "A collection of datasets made for model fine-tuning.",
|
|
@@ -42,11 +42,15 @@ const taskData = {
|
|
|
42
42
|
metrics: [],
|
|
43
43
|
models: [
|
|
44
44
|
{
|
|
45
|
-
description: "
|
|
46
|
-
id: "
|
|
45
|
+
description: "Small and efficient yet powerful vision language model.",
|
|
46
|
+
id: "HuggingFaceTB/SmolVLM-Instruct",
|
|
47
47
|
},
|
|
48
48
|
{
|
|
49
|
-
description: "
|
|
49
|
+
description: "A screenshot understanding model used to control computers.",
|
|
50
|
+
id: "showlab/ShowUI-2B",
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
description: "Cutting-edge vision language model.",
|
|
50
54
|
id: "allenai/Molmo-7B-D-0924",
|
|
51
55
|
},
|
|
52
56
|
{
|
|
@@ -58,8 +62,8 @@ const taskData = {
|
|
|
58
62
|
id: "Qwen/Qwen2-VL-7B-Instruct",
|
|
59
63
|
},
|
|
60
64
|
{
|
|
61
|
-
description: "
|
|
62
|
-
id: "
|
|
65
|
+
description: "Image-text-to-text model with reasoning capabilities.",
|
|
66
|
+
id: "Qwen/QVQ-72B-Preview",
|
|
63
67
|
},
|
|
64
68
|
{
|
|
65
69
|
description: "Strong image-text-to-text model focused on documents.",
|
|
@@ -83,14 +87,18 @@ const taskData = {
|
|
|
83
87
|
description: "An image-text-to-text application focused on documents.",
|
|
84
88
|
id: "stepfun-ai/GOT_official_online_demo",
|
|
85
89
|
},
|
|
86
|
-
{
|
|
87
|
-
description: "An application to compare outputs of different vision language models.",
|
|
88
|
-
id: "merve/compare_VLMs",
|
|
89
|
-
},
|
|
90
90
|
{
|
|
91
91
|
description: "An application for chatting with an image-text-to-text model.",
|
|
92
92
|
id: "GanymedeNil/Qwen2-VL-7B",
|
|
93
93
|
},
|
|
94
|
+
{
|
|
95
|
+
description: "An application that parses screenshots into actions.",
|
|
96
|
+
id: "showlab/ShowUI",
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
description: "An application that detects gaze.",
|
|
100
|
+
id: "smoondream/gaze-demo",
|
|
101
|
+
},
|
|
94
102
|
],
|
|
95
103
|
summary: "Image-text-to-text models take in an image and text prompt and output text. These models are also called vision-language models, or VLMs. The difference from image-to-text models is that these models take an additional text input, not restricting the model to certain use cases like image captioning, and may also be trained to accept a conversation as input.",
|
|
96
104
|
widgetModels: ["meta-llama/Llama-3.2-11B-Vision-Instruct"],
|
|
@@ -41,8 +41,8 @@ const taskData = {
|
|
|
41
41
|
id: "hwjiang/Real3D",
|
|
42
42
|
},
|
|
43
43
|
{
|
|
44
|
-
description: "
|
|
45
|
-
id: "
|
|
44
|
+
description: "Consistent image-to-3d generation model.",
|
|
45
|
+
id: "stabilityai/stable-point-aware-3d",
|
|
46
46
|
},
|
|
47
47
|
],
|
|
48
48
|
spaces: [
|
|
@@ -55,8 +55,8 @@ const taskData = {
|
|
|
55
55
|
id: "TencentARC/InstantMesh",
|
|
56
56
|
},
|
|
57
57
|
{
|
|
58
|
-
description: "Image-to-3D demo
|
|
59
|
-
id: "stabilityai/
|
|
58
|
+
description: "Image-to-3D demo.",
|
|
59
|
+
id: "stabilityai/stable-point-aware-3d",
|
|
60
60
|
},
|
|
61
61
|
{
|
|
62
62
|
description: "Image-to-3D demo with mesh outputs.",
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-image/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-image/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAuGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|