@huggingface/tasks 0.12.21 → 0.12.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +84 -0
- package/dist/index.js +84 -0
- package/dist/src/local-apps.d.ts +23 -0
- package/dist/src/local-apps.d.ts.map +1 -1
- package/dist/src/model-libraries.d.ts +9 -2
- package/dist/src/model-libraries.d.ts.map +1 -1
- package/package.json +4 -1
- package/src/local-apps.ts +80 -0
- package/src/model-libraries.ts +7 -0
package/dist/index.cjs
CHANGED
|
@@ -6039,6 +6039,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
6039
6039
|
filter: true,
|
|
6040
6040
|
countDownloads: `path:"models/default.zip"`
|
|
6041
6041
|
},
|
|
6042
|
+
"f5-tts": {
|
|
6043
|
+
prettyLabel: "F5-TTS",
|
|
6044
|
+
repoName: "F5-TTS",
|
|
6045
|
+
repoUrl: "https://github.com/SWivid/F5-TTS",
|
|
6046
|
+
filter: false,
|
|
6047
|
+
countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`
|
|
6048
|
+
},
|
|
6042
6049
|
tensorflowtts: {
|
|
6043
6050
|
prettyLabel: "TensorFlowTTS",
|
|
6044
6051
|
repoName: "TensorFlowTTS",
|
|
@@ -7177,6 +7184,7 @@ var SKUS = {
|
|
|
7177
7184
|
};
|
|
7178
7185
|
|
|
7179
7186
|
// src/local-apps.ts
|
|
7187
|
+
var import_gguf = require("@huggingface/gguf");
|
|
7180
7188
|
function isAwqModel(model) {
|
|
7181
7189
|
return model.config?.quantization_config?.quant_method === "awq";
|
|
7182
7190
|
}
|
|
@@ -7192,6 +7200,9 @@ function isMarlinModel(model) {
|
|
|
7192
7200
|
function isTransformersModel(model) {
|
|
7193
7201
|
return model.tags.includes("transformers");
|
|
7194
7202
|
}
|
|
7203
|
+
function isTgiModel(model) {
|
|
7204
|
+
return model.tags.includes("text-generation-inference");
|
|
7205
|
+
}
|
|
7195
7206
|
function isLlamaCppGgufModel(model) {
|
|
7196
7207
|
return !!model.gguf?.context_length;
|
|
7197
7208
|
}
|
|
@@ -7233,6 +7244,30 @@ var snippetLlamacpp = (model, filepath) => {
|
|
|
7233
7244
|
}
|
|
7234
7245
|
];
|
|
7235
7246
|
};
|
|
7247
|
+
var snippetNodeLlamaCppCli = (model, filepath) => {
|
|
7248
|
+
return [
|
|
7249
|
+
{
|
|
7250
|
+
title: "Chat with the model",
|
|
7251
|
+
content: [
|
|
7252
|
+
`npx -y node-llama-cpp chat \\`,
|
|
7253
|
+
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
|
|
7254
|
+
` --prompt 'Hi there!'`
|
|
7255
|
+
].join("\n")
|
|
7256
|
+
},
|
|
7257
|
+
{
|
|
7258
|
+
title: "Estimate the model compatibility with your hardware",
|
|
7259
|
+
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`
|
|
7260
|
+
}
|
|
7261
|
+
];
|
|
7262
|
+
};
|
|
7263
|
+
var snippetOllama = (model, filepath) => {
|
|
7264
|
+
if (filepath) {
|
|
7265
|
+
const quantLabel = (0, import_gguf.parseGGUFQuantLabel)(filepath);
|
|
7266
|
+
const ollamatag = quantLabel ? `:${quantLabel}` : "";
|
|
7267
|
+
return `ollama run hf.co/${model.id}${ollamatag}`;
|
|
7268
|
+
}
|
|
7269
|
+
return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
|
|
7270
|
+
};
|
|
7236
7271
|
var snippetLocalAI = (model, filepath) => {
|
|
7237
7272
|
const command = (binary) => ["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
|
|
7238
7273
|
return [
|
|
@@ -7294,6 +7329,34 @@ docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
|
|
|
7294
7329
|
}
|
|
7295
7330
|
];
|
|
7296
7331
|
};
|
|
7332
|
+
var snippetTgi = (model) => {
|
|
7333
|
+
const runCommand = [
|
|
7334
|
+
"# Call the server using curl:",
|
|
7335
|
+
`curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
|
|
7336
|
+
` -H "Content-Type: application/json" \\`,
|
|
7337
|
+
` --data '{`,
|
|
7338
|
+
` "model": "${model.id}",`,
|
|
7339
|
+
` "messages": [`,
|
|
7340
|
+
` {"role": "user", "content": "What is the capital of France?"}`,
|
|
7341
|
+
` ]`,
|
|
7342
|
+
` }'`
|
|
7343
|
+
];
|
|
7344
|
+
return [
|
|
7345
|
+
{
|
|
7346
|
+
title: "Use Docker images",
|
|
7347
|
+
setup: [
|
|
7348
|
+
"# Deploy with docker on Linux:",
|
|
7349
|
+
`docker run --gpus all \\`,
|
|
7350
|
+
` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
|
|
7351
|
+
` -e HF_TOKEN="<secret>" \\`,
|
|
7352
|
+
` -p 8000:80 \\`,
|
|
7353
|
+
` ghcr.io/huggingface/text-generation-inference:latest \\`,
|
|
7354
|
+
` --model-id ${model.id}`
|
|
7355
|
+
].join("\n"),
|
|
7356
|
+
content: [runCommand.join("\n")]
|
|
7357
|
+
}
|
|
7358
|
+
];
|
|
7359
|
+
};
|
|
7297
7360
|
var LOCAL_APPS = {
|
|
7298
7361
|
"llama.cpp": {
|
|
7299
7362
|
prettyLabel: "llama.cpp",
|
|
@@ -7302,6 +7365,13 @@ var LOCAL_APPS = {
|
|
|
7302
7365
|
displayOnModelPage: isLlamaCppGgufModel,
|
|
7303
7366
|
snippet: snippetLlamacpp
|
|
7304
7367
|
},
|
|
7368
|
+
"node-llama-cpp": {
|
|
7369
|
+
prettyLabel: "node-llama-cpp",
|
|
7370
|
+
docsUrl: "https://node-llama-cpp.withcat.ai",
|
|
7371
|
+
mainTask: "text-generation",
|
|
7372
|
+
displayOnModelPage: isLlamaCppGgufModel,
|
|
7373
|
+
snippet: snippetNodeLlamaCppCli
|
|
7374
|
+
},
|
|
7305
7375
|
vllm: {
|
|
7306
7376
|
prettyLabel: "vLLM",
|
|
7307
7377
|
docsUrl: "https://docs.vllm.ai",
|
|
@@ -7309,6 +7379,13 @@ var LOCAL_APPS = {
|
|
|
7309
7379
|
displayOnModelPage: (model) => (isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model)) && (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
|
|
7310
7380
|
snippet: snippetVllm
|
|
7311
7381
|
},
|
|
7382
|
+
tgi: {
|
|
7383
|
+
prettyLabel: "TGI",
|
|
7384
|
+
docsUrl: "https://huggingface.co/docs/text-generation-inference/",
|
|
7385
|
+
mainTask: "text-generation",
|
|
7386
|
+
displayOnModelPage: isTgiModel,
|
|
7387
|
+
snippet: snippetTgi
|
|
7388
|
+
},
|
|
7312
7389
|
lmstudio: {
|
|
7313
7390
|
prettyLabel: "LM Studio",
|
|
7314
7391
|
docsUrl: "https://lmstudio.ai",
|
|
@@ -7410,6 +7487,13 @@ var LOCAL_APPS = {
|
|
|
7410
7487
|
mainTask: "text-to-image",
|
|
7411
7488
|
displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
|
|
7412
7489
|
deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`)
|
|
7490
|
+
},
|
|
7491
|
+
ollama: {
|
|
7492
|
+
prettyLabel: "Ollama",
|
|
7493
|
+
docsUrl: "https://ollama.com",
|
|
7494
|
+
mainTask: "text-generation",
|
|
7495
|
+
displayOnModelPage: isLlamaCppGgufModel,
|
|
7496
|
+
snippet: snippetOllama
|
|
7413
7497
|
}
|
|
7414
7498
|
};
|
|
7415
7499
|
|
package/dist/index.js
CHANGED
|
@@ -6001,6 +6001,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
6001
6001
|
filter: true,
|
|
6002
6002
|
countDownloads: `path:"models/default.zip"`
|
|
6003
6003
|
},
|
|
6004
|
+
"f5-tts": {
|
|
6005
|
+
prettyLabel: "F5-TTS",
|
|
6006
|
+
repoName: "F5-TTS",
|
|
6007
|
+
repoUrl: "https://github.com/SWivid/F5-TTS",
|
|
6008
|
+
filter: false,
|
|
6009
|
+
countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`
|
|
6010
|
+
},
|
|
6004
6011
|
tensorflowtts: {
|
|
6005
6012
|
prettyLabel: "TensorFlowTTS",
|
|
6006
6013
|
repoName: "TensorFlowTTS",
|
|
@@ -7139,6 +7146,7 @@ var SKUS = {
|
|
|
7139
7146
|
};
|
|
7140
7147
|
|
|
7141
7148
|
// src/local-apps.ts
|
|
7149
|
+
import { parseGGUFQuantLabel } from "@huggingface/gguf";
|
|
7142
7150
|
function isAwqModel(model) {
|
|
7143
7151
|
return model.config?.quantization_config?.quant_method === "awq";
|
|
7144
7152
|
}
|
|
@@ -7154,6 +7162,9 @@ function isMarlinModel(model) {
|
|
|
7154
7162
|
function isTransformersModel(model) {
|
|
7155
7163
|
return model.tags.includes("transformers");
|
|
7156
7164
|
}
|
|
7165
|
+
function isTgiModel(model) {
|
|
7166
|
+
return model.tags.includes("text-generation-inference");
|
|
7167
|
+
}
|
|
7157
7168
|
function isLlamaCppGgufModel(model) {
|
|
7158
7169
|
return !!model.gguf?.context_length;
|
|
7159
7170
|
}
|
|
@@ -7195,6 +7206,30 @@ var snippetLlamacpp = (model, filepath) => {
|
|
|
7195
7206
|
}
|
|
7196
7207
|
];
|
|
7197
7208
|
};
|
|
7209
|
+
var snippetNodeLlamaCppCli = (model, filepath) => {
|
|
7210
|
+
return [
|
|
7211
|
+
{
|
|
7212
|
+
title: "Chat with the model",
|
|
7213
|
+
content: [
|
|
7214
|
+
`npx -y node-llama-cpp chat \\`,
|
|
7215
|
+
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
|
|
7216
|
+
` --prompt 'Hi there!'`
|
|
7217
|
+
].join("\n")
|
|
7218
|
+
},
|
|
7219
|
+
{
|
|
7220
|
+
title: "Estimate the model compatibility with your hardware",
|
|
7221
|
+
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`
|
|
7222
|
+
}
|
|
7223
|
+
];
|
|
7224
|
+
};
|
|
7225
|
+
var snippetOllama = (model, filepath) => {
|
|
7226
|
+
if (filepath) {
|
|
7227
|
+
const quantLabel = parseGGUFQuantLabel(filepath);
|
|
7228
|
+
const ollamatag = quantLabel ? `:${quantLabel}` : "";
|
|
7229
|
+
return `ollama run hf.co/${model.id}${ollamatag}`;
|
|
7230
|
+
}
|
|
7231
|
+
return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
|
|
7232
|
+
};
|
|
7198
7233
|
var snippetLocalAI = (model, filepath) => {
|
|
7199
7234
|
const command = (binary) => ["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
|
|
7200
7235
|
return [
|
|
@@ -7256,6 +7291,34 @@ docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
|
|
|
7256
7291
|
}
|
|
7257
7292
|
];
|
|
7258
7293
|
};
|
|
7294
|
+
var snippetTgi = (model) => {
|
|
7295
|
+
const runCommand = [
|
|
7296
|
+
"# Call the server using curl:",
|
|
7297
|
+
`curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
|
|
7298
|
+
` -H "Content-Type: application/json" \\`,
|
|
7299
|
+
` --data '{`,
|
|
7300
|
+
` "model": "${model.id}",`,
|
|
7301
|
+
` "messages": [`,
|
|
7302
|
+
` {"role": "user", "content": "What is the capital of France?"}`,
|
|
7303
|
+
` ]`,
|
|
7304
|
+
` }'`
|
|
7305
|
+
];
|
|
7306
|
+
return [
|
|
7307
|
+
{
|
|
7308
|
+
title: "Use Docker images",
|
|
7309
|
+
setup: [
|
|
7310
|
+
"# Deploy with docker on Linux:",
|
|
7311
|
+
`docker run --gpus all \\`,
|
|
7312
|
+
` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
|
|
7313
|
+
` -e HF_TOKEN="<secret>" \\`,
|
|
7314
|
+
` -p 8000:80 \\`,
|
|
7315
|
+
` ghcr.io/huggingface/text-generation-inference:latest \\`,
|
|
7316
|
+
` --model-id ${model.id}`
|
|
7317
|
+
].join("\n"),
|
|
7318
|
+
content: [runCommand.join("\n")]
|
|
7319
|
+
}
|
|
7320
|
+
];
|
|
7321
|
+
};
|
|
7259
7322
|
var LOCAL_APPS = {
|
|
7260
7323
|
"llama.cpp": {
|
|
7261
7324
|
prettyLabel: "llama.cpp",
|
|
@@ -7264,6 +7327,13 @@ var LOCAL_APPS = {
|
|
|
7264
7327
|
displayOnModelPage: isLlamaCppGgufModel,
|
|
7265
7328
|
snippet: snippetLlamacpp
|
|
7266
7329
|
},
|
|
7330
|
+
"node-llama-cpp": {
|
|
7331
|
+
prettyLabel: "node-llama-cpp",
|
|
7332
|
+
docsUrl: "https://node-llama-cpp.withcat.ai",
|
|
7333
|
+
mainTask: "text-generation",
|
|
7334
|
+
displayOnModelPage: isLlamaCppGgufModel,
|
|
7335
|
+
snippet: snippetNodeLlamaCppCli
|
|
7336
|
+
},
|
|
7267
7337
|
vllm: {
|
|
7268
7338
|
prettyLabel: "vLLM",
|
|
7269
7339
|
docsUrl: "https://docs.vllm.ai",
|
|
@@ -7271,6 +7341,13 @@ var LOCAL_APPS = {
|
|
|
7271
7341
|
displayOnModelPage: (model) => (isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model)) && (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
|
|
7272
7342
|
snippet: snippetVllm
|
|
7273
7343
|
},
|
|
7344
|
+
tgi: {
|
|
7345
|
+
prettyLabel: "TGI",
|
|
7346
|
+
docsUrl: "https://huggingface.co/docs/text-generation-inference/",
|
|
7347
|
+
mainTask: "text-generation",
|
|
7348
|
+
displayOnModelPage: isTgiModel,
|
|
7349
|
+
snippet: snippetTgi
|
|
7350
|
+
},
|
|
7274
7351
|
lmstudio: {
|
|
7275
7352
|
prettyLabel: "LM Studio",
|
|
7276
7353
|
docsUrl: "https://lmstudio.ai",
|
|
@@ -7372,6 +7449,13 @@ var LOCAL_APPS = {
|
|
|
7372
7449
|
mainTask: "text-to-image",
|
|
7373
7450
|
displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
|
|
7374
7451
|
deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`)
|
|
7452
|
+
},
|
|
7453
|
+
ollama: {
|
|
7454
|
+
prettyLabel: "Ollama",
|
|
7455
|
+
docsUrl: "https://ollama.com",
|
|
7456
|
+
mainTask: "text-generation",
|
|
7457
|
+
displayOnModelPage: isLlamaCppGgufModel,
|
|
7458
|
+
snippet: snippetOllama
|
|
7375
7459
|
}
|
|
7376
7460
|
};
|
|
7377
7461
|
|
package/dist/src/local-apps.d.ts
CHANGED
|
@@ -48,9 +48,11 @@ export type LocalApp = {
|
|
|
48
48
|
/**
|
|
49
49
|
* And if not (mostly llama.cpp), snippet to copy/paste in your terminal
|
|
50
50
|
* Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
|
|
51
|
+
* Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
|
|
51
52
|
*/
|
|
52
53
|
snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];
|
|
53
54
|
});
|
|
55
|
+
declare function isTgiModel(model: ModelData): boolean;
|
|
54
56
|
declare function isLlamaCppGgufModel(model: ModelData): boolean;
|
|
55
57
|
/**
|
|
56
58
|
* Add your new local app here.
|
|
@@ -71,6 +73,13 @@ export declare const LOCAL_APPS: {
|
|
|
71
73
|
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
72
74
|
snippet: (model: ModelData, filepath?: string) => LocalAppSnippet[];
|
|
73
75
|
};
|
|
76
|
+
"node-llama-cpp": {
|
|
77
|
+
prettyLabel: string;
|
|
78
|
+
docsUrl: string;
|
|
79
|
+
mainTask: "text-generation";
|
|
80
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
81
|
+
snippet: (model: ModelData, filepath?: string) => LocalAppSnippet[];
|
|
82
|
+
};
|
|
74
83
|
vllm: {
|
|
75
84
|
prettyLabel: string;
|
|
76
85
|
docsUrl: string;
|
|
@@ -78,6 +87,13 @@ export declare const LOCAL_APPS: {
|
|
|
78
87
|
displayOnModelPage: (model: ModelData) => boolean;
|
|
79
88
|
snippet: (model: ModelData) => LocalAppSnippet[];
|
|
80
89
|
};
|
|
90
|
+
tgi: {
|
|
91
|
+
prettyLabel: string;
|
|
92
|
+
docsUrl: string;
|
|
93
|
+
mainTask: "text-generation";
|
|
94
|
+
displayOnModelPage: typeof isTgiModel;
|
|
95
|
+
snippet: (model: ModelData) => LocalAppSnippet[];
|
|
96
|
+
};
|
|
81
97
|
lmstudio: {
|
|
82
98
|
prettyLabel: string;
|
|
83
99
|
docsUrl: string;
|
|
@@ -166,6 +182,13 @@ export declare const LOCAL_APPS: {
|
|
|
166
182
|
displayOnModelPage: (model: ModelData) => boolean;
|
|
167
183
|
deeplink: (model: ModelData) => URL;
|
|
168
184
|
};
|
|
185
|
+
ollama: {
|
|
186
|
+
prettyLabel: string;
|
|
187
|
+
docsUrl: string;
|
|
188
|
+
mainTask: "text-generation";
|
|
189
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
190
|
+
snippet: (model: ModelData, filepath?: string) => string;
|
|
191
|
+
};
|
|
169
192
|
};
|
|
170
193
|
export type LocalAppKey = keyof typeof LOCAL_APPS;
|
|
171
194
|
export {};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;
|
|
1
|
+
{"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAGhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;;OAIG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAqBF,iBAAS,UAAU,CAAC,KAAK,EAAE,SAAS,GAAG,OAAO,CAE7C;AAED,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AA+JD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBApKS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;yBAqCzC,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCAkJzD,SAAS;yBAjGX,SAAS,KAAG,eAAe,EAAE;;;;;;;yBAsC9B,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBA7DzB,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;yBATjD,SAAS,aAAa,MAAM,KAAG,MAAM;;CAsQ/B,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
|
|
@@ -660,6 +660,13 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
660
660
|
filter: true;
|
|
661
661
|
countDownloads: string;
|
|
662
662
|
};
|
|
663
|
+
"f5-tts": {
|
|
664
|
+
prettyLabel: string;
|
|
665
|
+
repoName: string;
|
|
666
|
+
repoUrl: string;
|
|
667
|
+
filter: false;
|
|
668
|
+
countDownloads: string;
|
|
669
|
+
};
|
|
663
670
|
tensorflowtts: {
|
|
664
671
|
prettyLabel: string;
|
|
665
672
|
repoName: string;
|
|
@@ -752,6 +759,6 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
752
759
|
};
|
|
753
760
|
};
|
|
754
761
|
export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
|
|
755
|
-
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
756
|
-
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
762
|
+
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
763
|
+
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
757
764
|
//# sourceMappingURL=model-libraries.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B
|
|
1
|
+
{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAksBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,gyCAAgE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,gyCAQ1B,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/tasks",
|
|
3
3
|
"packageManager": "pnpm@8.10.5",
|
|
4
|
-
"version": "0.12.
|
|
4
|
+
"version": "0.12.22",
|
|
5
5
|
"description": "List of ML tasks for huggingface.co/tasks",
|
|
6
6
|
"repository": "https://github.com/huggingface/huggingface.js.git",
|
|
7
7
|
"publishConfig": {
|
|
@@ -36,6 +36,9 @@
|
|
|
36
36
|
"quicktype-core": "https://github.com/huggingface/quicktype/raw/pack-18.0.17/packages/quicktype-core/quicktype-core-18.0.17.tgz",
|
|
37
37
|
"type-fest": "^3.13.1"
|
|
38
38
|
},
|
|
39
|
+
"dependencies": {
|
|
40
|
+
"@huggingface/gguf": "^0.1.12"
|
|
41
|
+
},
|
|
39
42
|
"scripts": {
|
|
40
43
|
"lint": "eslint --quiet --fix --ext .cjs,.ts .",
|
|
41
44
|
"lint:check": "eslint --ext .cjs,.ts .",
|
package/src/local-apps.ts
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { ModelData } from "./model-data";
|
|
2
2
|
import type { PipelineType } from "./pipelines";
|
|
3
|
+
import { parseGGUFQuantLabel } from "@huggingface/gguf";
|
|
3
4
|
|
|
4
5
|
export interface LocalAppSnippet {
|
|
5
6
|
/**
|
|
@@ -53,6 +54,7 @@ export type LocalApp = {
|
|
|
53
54
|
/**
|
|
54
55
|
* And if not (mostly llama.cpp), snippet to copy/paste in your terminal
|
|
55
56
|
* Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
|
|
57
|
+
* Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
|
|
56
58
|
*/
|
|
57
59
|
snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];
|
|
58
60
|
}
|
|
@@ -77,6 +79,9 @@ function isMarlinModel(model: ModelData): boolean {
|
|
|
77
79
|
function isTransformersModel(model: ModelData): boolean {
|
|
78
80
|
return model.tags.includes("transformers");
|
|
79
81
|
}
|
|
82
|
+
function isTgiModel(model: ModelData): boolean {
|
|
83
|
+
return model.tags.includes("text-generation-inference");
|
|
84
|
+
}
|
|
80
85
|
|
|
81
86
|
function isLlamaCppGgufModel(model: ModelData) {
|
|
82
87
|
return !!model.gguf?.context_length;
|
|
@@ -123,6 +128,32 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
|
|
|
123
128
|
];
|
|
124
129
|
};
|
|
125
130
|
|
|
131
|
+
const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
|
|
132
|
+
return [
|
|
133
|
+
{
|
|
134
|
+
title: "Chat with the model",
|
|
135
|
+
content: [
|
|
136
|
+
`npx -y node-llama-cpp chat \\`,
|
|
137
|
+
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
|
|
138
|
+
` --prompt 'Hi there!'`,
|
|
139
|
+
].join("\n"),
|
|
140
|
+
},
|
|
141
|
+
{
|
|
142
|
+
title: "Estimate the model compatibility with your hardware",
|
|
143
|
+
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
|
|
144
|
+
},
|
|
145
|
+
];
|
|
146
|
+
};
|
|
147
|
+
|
|
148
|
+
const snippetOllama = (model: ModelData, filepath?: string): string => {
|
|
149
|
+
if (filepath) {
|
|
150
|
+
const quantLabel = parseGGUFQuantLabel(filepath);
|
|
151
|
+
const ollamatag = quantLabel ? `:${quantLabel}` : "";
|
|
152
|
+
return `ollama run hf.co/${model.id}${ollamatag}`;
|
|
153
|
+
}
|
|
154
|
+
return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
|
|
155
|
+
};
|
|
156
|
+
|
|
126
157
|
const snippetLocalAI = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
|
|
127
158
|
const command = (binary: string) =>
|
|
128
159
|
["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
|
|
@@ -184,6 +215,34 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
|
|
|
184
215
|
},
|
|
185
216
|
];
|
|
186
217
|
};
|
|
218
|
+
const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
|
|
219
|
+
const runCommand = [
|
|
220
|
+
"# Call the server using curl:",
|
|
221
|
+
`curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
|
|
222
|
+
` -H "Content-Type: application/json" \\`,
|
|
223
|
+
` --data '{`,
|
|
224
|
+
` "model": "${model.id}",`,
|
|
225
|
+
` "messages": [`,
|
|
226
|
+
` {"role": "user", "content": "What is the capital of France?"}`,
|
|
227
|
+
` ]`,
|
|
228
|
+
` }'`,
|
|
229
|
+
];
|
|
230
|
+
return [
|
|
231
|
+
{
|
|
232
|
+
title: "Use Docker images",
|
|
233
|
+
setup: [
|
|
234
|
+
"# Deploy with docker on Linux:",
|
|
235
|
+
`docker run --gpus all \\`,
|
|
236
|
+
` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
|
|
237
|
+
` -e HF_TOKEN="<secret>" \\`,
|
|
238
|
+
` -p 8000:80 \\`,
|
|
239
|
+
` ghcr.io/huggingface/text-generation-inference:latest \\`,
|
|
240
|
+
` --model-id ${model.id}`,
|
|
241
|
+
].join("\n"),
|
|
242
|
+
content: [runCommand.join("\n")],
|
|
243
|
+
},
|
|
244
|
+
];
|
|
245
|
+
};
|
|
187
246
|
|
|
188
247
|
/**
|
|
189
248
|
* Add your new local app here.
|
|
@@ -204,6 +263,13 @@ export const LOCAL_APPS = {
|
|
|
204
263
|
displayOnModelPage: isLlamaCppGgufModel,
|
|
205
264
|
snippet: snippetLlamacpp,
|
|
206
265
|
},
|
|
266
|
+
"node-llama-cpp": {
|
|
267
|
+
prettyLabel: "node-llama-cpp",
|
|
268
|
+
docsUrl: "https://node-llama-cpp.withcat.ai",
|
|
269
|
+
mainTask: "text-generation",
|
|
270
|
+
displayOnModelPage: isLlamaCppGgufModel,
|
|
271
|
+
snippet: snippetNodeLlamaCppCli,
|
|
272
|
+
},
|
|
207
273
|
vllm: {
|
|
208
274
|
prettyLabel: "vLLM",
|
|
209
275
|
docsUrl: "https://docs.vllm.ai",
|
|
@@ -218,6 +284,13 @@ export const LOCAL_APPS = {
|
|
|
218
284
|
(model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
|
|
219
285
|
snippet: snippetVllm,
|
|
220
286
|
},
|
|
287
|
+
tgi: {
|
|
288
|
+
prettyLabel: "TGI",
|
|
289
|
+
docsUrl: "https://huggingface.co/docs/text-generation-inference/",
|
|
290
|
+
mainTask: "text-generation",
|
|
291
|
+
displayOnModelPage: isTgiModel,
|
|
292
|
+
snippet: snippetTgi,
|
|
293
|
+
},
|
|
221
294
|
lmstudio: {
|
|
222
295
|
prettyLabel: "LM Studio",
|
|
223
296
|
docsUrl: "https://lmstudio.ai",
|
|
@@ -327,6 +400,13 @@ export const LOCAL_APPS = {
|
|
|
327
400
|
displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
|
|
328
401
|
deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`),
|
|
329
402
|
},
|
|
403
|
+
ollama: {
|
|
404
|
+
prettyLabel: "Ollama",
|
|
405
|
+
docsUrl: "https://ollama.com",
|
|
406
|
+
mainTask: "text-generation",
|
|
407
|
+
displayOnModelPage: isLlamaCppGgufModel,
|
|
408
|
+
snippet: snippetOllama,
|
|
409
|
+
},
|
|
330
410
|
} satisfies Record<string, LocalApp>;
|
|
331
411
|
|
|
332
412
|
export type LocalAppKey = keyof typeof LOCAL_APPS;
|
package/src/model-libraries.ts
CHANGED
|
@@ -666,6 +666,13 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
666
666
|
filter: true,
|
|
667
667
|
countDownloads: `path:"models/default.zip"`,
|
|
668
668
|
},
|
|
669
|
+
"f5-tts": {
|
|
670
|
+
prettyLabel: "F5-TTS",
|
|
671
|
+
repoName: "F5-TTS",
|
|
672
|
+
repoUrl: "https://github.com/SWivid/F5-TTS",
|
|
673
|
+
filter: false,
|
|
674
|
+
countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`,
|
|
675
|
+
},
|
|
669
676
|
tensorflowtts: {
|
|
670
677
|
prettyLabel: "TensorFlowTTS",
|
|
671
678
|
repoName: "TensorFlowTTS",
|