@huggingface/tasks 0.12.18 → 0.12.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +9 -3
- package/dist/index.js +9 -3
- package/dist/src/local-apps.d.ts.map +1 -1
- package/dist/src/model-libraries.d.ts +8 -2
- package/dist/src/model-libraries.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/local-apps.ts +7 -6
- package/src/model-libraries.ts +6 -0
- package/src/tasks/image-text-to-text/data.ts +2 -2
package/dist/index.cjs
CHANGED
|
@@ -2220,8 +2220,8 @@ var taskData11 = {
|
|
|
2220
2220
|
metrics: [],
|
|
2221
2221
|
models: [
|
|
2222
2222
|
{
|
|
2223
|
-
description: "
|
|
2224
|
-
id: "
|
|
2223
|
+
description: "Powerful vision language model with great visual understanding and reasoning capabilities.",
|
|
2224
|
+
id: "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
|
2225
2225
|
},
|
|
2226
2226
|
{
|
|
2227
2227
|
description: "Cutting-edge conversational vision language model that can take multiple image inputs.",
|
|
@@ -5866,6 +5866,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
5866
5866
|
filter: false,
|
|
5867
5867
|
countDownloads: `path:"model.safetensors"`
|
|
5868
5868
|
},
|
|
5869
|
+
reverb: {
|
|
5870
|
+
prettyLabel: "Reverb",
|
|
5871
|
+
repoName: "Reverb",
|
|
5872
|
+
repoUrl: "https://github.com/revdotcom/reverb",
|
|
5873
|
+
filter: false
|
|
5874
|
+
},
|
|
5869
5875
|
saelens: {
|
|
5870
5876
|
prettyLabel: "SAELens",
|
|
5871
5877
|
repoName: "SAELens",
|
|
@@ -7266,7 +7272,7 @@ var LOCAL_APPS = {
|
|
|
7266
7272
|
prettyLabel: "vLLM",
|
|
7267
7273
|
docsUrl: "https://docs.vllm.ai",
|
|
7268
7274
|
mainTask: "text-generation",
|
|
7269
|
-
displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model),
|
|
7275
|
+
displayOnModelPage: (model) => (isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model)) && (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
|
|
7270
7276
|
snippet: snippetVllm
|
|
7271
7277
|
},
|
|
7272
7278
|
lmstudio: {
|
package/dist/index.js
CHANGED
|
@@ -2182,8 +2182,8 @@ var taskData11 = {
|
|
|
2182
2182
|
metrics: [],
|
|
2183
2183
|
models: [
|
|
2184
2184
|
{
|
|
2185
|
-
description: "
|
|
2186
|
-
id: "
|
|
2185
|
+
description: "Powerful vision language model with great visual understanding and reasoning capabilities.",
|
|
2186
|
+
id: "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
|
2187
2187
|
},
|
|
2188
2188
|
{
|
|
2189
2189
|
description: "Cutting-edge conversational vision language model that can take multiple image inputs.",
|
|
@@ -5828,6 +5828,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
5828
5828
|
filter: false,
|
|
5829
5829
|
countDownloads: `path:"model.safetensors"`
|
|
5830
5830
|
},
|
|
5831
|
+
reverb: {
|
|
5832
|
+
prettyLabel: "Reverb",
|
|
5833
|
+
repoName: "Reverb",
|
|
5834
|
+
repoUrl: "https://github.com/revdotcom/reverb",
|
|
5835
|
+
filter: false
|
|
5836
|
+
},
|
|
5831
5837
|
saelens: {
|
|
5832
5838
|
prettyLabel: "SAELens",
|
|
5833
5839
|
repoName: "SAELens",
|
|
@@ -7228,7 +7234,7 @@ var LOCAL_APPS = {
|
|
|
7228
7234
|
prettyLabel: "vLLM",
|
|
7229
7235
|
docsUrl: "https://docs.vllm.ai",
|
|
7230
7236
|
mainTask: "text-generation",
|
|
7231
|
-
displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model),
|
|
7237
|
+
displayOnModelPage: (model) => (isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model)) && (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
|
|
7232
7238
|
snippet: snippetVllm
|
|
7233
7239
|
},
|
|
7234
7240
|
lmstudio: {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAsBF,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAqGD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBA9GS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCA0HlD,SAAS;yBA9DX,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAvB1B,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
1
|
+
{"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAsBF,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAqGD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBA9GS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCA0HlD,SAAS;yBA9DX,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAvB1B,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA4M3C,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
|
|
@@ -510,6 +510,12 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
510
510
|
filter: false;
|
|
511
511
|
countDownloads: string;
|
|
512
512
|
};
|
|
513
|
+
reverb: {
|
|
514
|
+
prettyLabel: string;
|
|
515
|
+
repoName: string;
|
|
516
|
+
repoUrl: string;
|
|
517
|
+
filter: false;
|
|
518
|
+
};
|
|
513
519
|
saelens: {
|
|
514
520
|
prettyLabel: string;
|
|
515
521
|
repoName: string;
|
|
@@ -738,6 +744,6 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
738
744
|
};
|
|
739
745
|
};
|
|
740
746
|
export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
|
|
741
|
-
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
742
|
-
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
747
|
+
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
748
|
+
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
743
749
|
//# sourceMappingURL=model-libraries.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B
|
|
1
|
+
{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAmrBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,uwCAAgE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,uwCAQ1B,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/tasks",
|
|
3
3
|
"packageManager": "pnpm@8.10.5",
|
|
4
|
-
"version": "0.12.
|
|
4
|
+
"version": "0.12.20",
|
|
5
5
|
"description": "List of ML tasks for huggingface.co/tasks",
|
|
6
6
|
"repository": "https://github.com/huggingface/huggingface.js.git",
|
|
7
7
|
"publishConfig": {
|
package/src/local-apps.ts
CHANGED
|
@@ -205,12 +205,13 @@ export const LOCAL_APPS = {
|
|
|
205
205
|
docsUrl: "https://docs.vllm.ai",
|
|
206
206
|
mainTask: "text-generation",
|
|
207
207
|
displayOnModelPage: (model: ModelData) =>
|
|
208
|
-
isAwqModel(model) ||
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
208
|
+
(isAwqModel(model) ||
|
|
209
|
+
isGptqModel(model) ||
|
|
210
|
+
isAqlmModel(model) ||
|
|
211
|
+
isMarlinModel(model) ||
|
|
212
|
+
isLlamaCppGgufModel(model) ||
|
|
213
|
+
isTransformersModel(model)) &&
|
|
214
|
+
(model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
|
|
214
215
|
snippet: snippetVllm,
|
|
215
216
|
},
|
|
216
217
|
lmstudio: {
|
package/src/model-libraries.ts
CHANGED
|
@@ -516,6 +516,12 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
516
516
|
filter: false,
|
|
517
517
|
countDownloads: `path:"model.safetensors"`,
|
|
518
518
|
},
|
|
519
|
+
reverb: {
|
|
520
|
+
prettyLabel: "Reverb",
|
|
521
|
+
repoName: "Reverb",
|
|
522
|
+
repoUrl: "https://github.com/revdotcom/reverb",
|
|
523
|
+
filter: false,
|
|
524
|
+
},
|
|
519
525
|
saelens: {
|
|
520
526
|
prettyLabel: "SAELens",
|
|
521
527
|
repoName: "SAELens",
|
|
@@ -43,8 +43,8 @@ const taskData: TaskDataCustom = {
|
|
|
43
43
|
metrics: [],
|
|
44
44
|
models: [
|
|
45
45
|
{
|
|
46
|
-
description: "
|
|
47
|
-
id: "
|
|
46
|
+
description: "Powerful vision language model with great visual understanding and reasoning capabilities.",
|
|
47
|
+
id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
48
48
|
},
|
|
49
49
|
{
|
|
50
50
|
description: "Cutting-edge conversational vision language model that can take multiple image inputs.",
|