@huggingface/tasks 0.19.83 → 0.19.85

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/commonjs/gguf.d.ts.map +1 -1
  2. package/dist/commonjs/gguf.js +1 -1
  3. package/dist/commonjs/hardware.d.ts +36 -0
  4. package/dist/commonjs/hardware.d.ts.map +1 -1
  5. package/dist/commonjs/hardware.js +40 -4
  6. package/dist/commonjs/local-apps.d.ts +4 -2
  7. package/dist/commonjs/local-apps.d.ts.map +1 -1
  8. package/dist/commonjs/local-apps.js +23 -19
  9. package/dist/commonjs/local-apps.spec.js +7 -6
  10. package/dist/commonjs/model-libraries-snippets.d.ts +1 -0
  11. package/dist/commonjs/model-libraries-snippets.d.ts.map +1 -1
  12. package/dist/commonjs/model-libraries-snippets.js +26 -3
  13. package/dist/commonjs/model-libraries.d.ts +22 -1
  14. package/dist/commonjs/model-libraries.d.ts.map +1 -1
  15. package/dist/commonjs/model-libraries.js +21 -0
  16. package/dist/esm/gguf.d.ts.map +1 -1
  17. package/dist/esm/gguf.js +1 -1
  18. package/dist/esm/hardware.d.ts +36 -0
  19. package/dist/esm/hardware.d.ts.map +1 -1
  20. package/dist/esm/hardware.js +40 -4
  21. package/dist/esm/local-apps.d.ts +4 -2
  22. package/dist/esm/local-apps.d.ts.map +1 -1
  23. package/dist/esm/local-apps.js +23 -19
  24. package/dist/esm/local-apps.spec.js +7 -6
  25. package/dist/esm/model-libraries-snippets.d.ts +1 -0
  26. package/dist/esm/model-libraries-snippets.d.ts.map +1 -1
  27. package/dist/esm/model-libraries-snippets.js +24 -2
  28. package/dist/esm/model-libraries.d.ts +22 -1
  29. package/dist/esm/model-libraries.d.ts.map +1 -1
  30. package/dist/esm/model-libraries.js +21 -0
  31. package/package.json +1 -1
  32. package/src/gguf.ts +3 -1
  33. package/src/hardware.ts +40 -4
  34. package/src/local-apps.spec.ts +7 -6
  35. package/src/local-apps.ts +27 -21
  36. package/src/model-libraries-snippets.ts +25 -2
  37. package/src/model-libraries.ts +21 -0
package/src/local-apps.ts CHANGED
@@ -91,6 +91,22 @@ function isLlamaCppGgufModel(model: ModelData) {
91
91
  return !!model.gguf?.context_length;
92
92
  }
93
93
 
94
+ function isVllmModel(model: ModelData): boolean {
95
+ return (
96
+ (isAwqModel(model) ||
97
+ isGptqModel(model) ||
98
+ isAqlmModel(model) ||
99
+ isMarlinModel(model) ||
100
+ isLlamaCppGgufModel(model) ||
101
+ isTransformersModel(model)) &&
102
+ (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text")
103
+ );
104
+ }
105
+
106
+ function isDockerModelRunnerModel(model: ModelData): boolean {
107
+ return isLlamaCppGgufModel(model) || isVllmModel(model);
108
+ }
109
+
94
110
  function isAmdRyzenModel(model: ModelData) {
95
111
  return model.tags.includes("ryzenai-hybrid") || model.tags.includes("ryzenai-npu");
96
112
  }
@@ -152,6 +168,10 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
152
168
  ].join("\n"),
153
169
  content: [serverCommand("./build/bin/llama-server"), cliCommand("./build/bin/llama-cli")],
154
170
  },
171
+ {
172
+ title: "Use Docker",
173
+ content: snippetDockerModelRunner(model, filepath),
174
+ },
155
175
  ];
156
176
  };
157
177
 
@@ -216,14 +236,6 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
216
236
  const serverCommand = `# Start the vLLM server:
217
237
  vllm serve "${model.id}"${mistralFlags}`;
218
238
 
219
- const dockerCommand = `docker run --gpus all \\
220
- -v ~/.cache/huggingface:/root/.cache/huggingface \\
221
- --env "HF_TOKEN=<secret>" \\
222
- -p 8000:8000 \\
223
- --ipc=host \\
224
- vllm/vllm-openai:latest \\
225
- --model "${model.id}"${mistralFlags}`;
226
-
227
239
  const runCommandInstruct = `# Call the server using curl (OpenAI-compatible API):
228
240
  curl -X POST "http://localhost:8000/v1/chat/completions" \\
229
241
  -H "Content-Type: application/json" \\
@@ -253,9 +265,8 @@ curl -X POST "http://localhost:8000/v1/completions" \\
253
265
  content: [serverCommand, runCommand],
254
266
  },
255
267
  {
256
- title: "Use Docker images",
257
- setup: dockerCommand,
258
- content: [runCommand],
268
+ title: "Use Docker",
269
+ content: snippetDockerModelRunner(model),
259
270
  },
260
271
  ];
261
272
  };
@@ -379,7 +390,9 @@ const snippetMlxLm = (model: ModelData): LocalAppSnippet[] => {
379
390
  };
380
391
 
381
392
  const snippetDockerModelRunner = (model: ModelData, filepath?: string): string => {
382
- return `docker model run hf.co/${model.id}${getQuantTag(filepath)}`;
393
+ // Only add quant tag for GGUF models, not safetensors
394
+ const quantTag = isLlamaCppGgufModel(model) ? getQuantTag(filepath) : "";
395
+ return `docker model run hf.co/${model.id}${quantTag}`;
383
396
  };
384
397
 
385
398
  const snippetLemonade = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
@@ -454,14 +467,7 @@ export const LOCAL_APPS = {
454
467
  prettyLabel: "vLLM",
455
468
  docsUrl: "https://docs.vllm.ai",
456
469
  mainTask: "text-generation",
457
- displayOnModelPage: (model: ModelData) =>
458
- (isAwqModel(model) ||
459
- isGptqModel(model) ||
460
- isAqlmModel(model) ||
461
- isMarlinModel(model) ||
462
- isLlamaCppGgufModel(model) ||
463
- isTransformersModel(model)) &&
464
- (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
470
+ displayOnModelPage: isVllmModel,
465
471
  snippet: snippetVllm,
466
472
  },
467
473
  sglang: {
@@ -604,7 +610,7 @@ export const LOCAL_APPS = {
604
610
  prettyLabel: "Docker Model Runner",
605
611
  docsUrl: "https://docs.docker.com/ai/model-runner/",
606
612
  mainTask: "text-generation",
607
- displayOnModelPage: isLlamaCppGgufModel,
613
+ displayOnModelPage: isDockerModelRunnerModel,
608
614
  snippet: snippetDockerModelRunner,
609
615
  },
610
616
  lemonade: {
@@ -1159,8 +1159,8 @@ for res in output:
1159
1159
  }
1160
1160
 
1161
1161
  if (model.tags.includes("document-parse")) {
1162
- const rawVersion = model.id.replace("PaddleOCR-VL-", "v");
1163
- const version = rawVersion === "PaddleOCR-VL" ? "v1" : rawVersion;
1162
+ const rawVersion = model.id.replace("PaddlePaddle/PaddleOCR-VL-", "v");
1163
+ const version = rawVersion === "PaddlePaddle/PaddleOCR-VL" ? "v1" : rawVersion;
1164
1164
  return [
1165
1165
  `# See https://www.paddleocr.ai/latest/version3.x/pipeline_usage/PaddleOCR-VL.html to installation
1166
1166
 
@@ -2277,6 +2277,29 @@ export const pythae = (model: ModelData): string[] => [
2277
2277
  model = AutoModel.load_from_hf_hub("${model.id}")`,
2278
2278
  ];
2279
2279
 
2280
+ export const qwen3_tts = (model: ModelData): string[] => [
2281
+ `# pip install qwen-tts
2282
+ import torch
2283
+ import soundfile as sf
2284
+ from qwen_tts import Qwen3TTSModel
2285
+
2286
+ model = Qwen3TTSModel.from_pretrained(
2287
+ "${model.id}",
2288
+ device_map="cuda:0",
2289
+ dtype=torch.bfloat16,
2290
+ attn_implementation="flash_attention_2",
2291
+ )
2292
+
2293
+ wavs, sr = model.generate_custom_voice(
2294
+ text="Your text here.",
2295
+ language="English",
2296
+ speaker="Ryan",
2297
+ instruct="Speak in a natural tone.",
2298
+ )
2299
+
2300
+ sf.write("output.wav", wavs[0], sr)`,
2301
+ ];
2302
+
2280
2303
  const musicgen = (model: ModelData): string[] => [
2281
2304
  `from audiocraft.models import MusicGen
2282
2305
 
@@ -629,6 +629,13 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
629
629
  filter: false,
630
630
  countDownloads: `path:"infu_flux_v1.0/sim_stage1/image_proj_model.bin" OR path:"infu_flux_v1.0/aes_stage2/image_proj_model.bin"`,
631
631
  },
632
+ intellifold: {
633
+ prettyLabel: "IntelliFold",
634
+ repoName: "IntelliFold",
635
+ repoUrl: "https://github.com/IntelliGen-AI/IntelliFold",
636
+ filter: false,
637
+ countDownloads: `path_extension:"pt"`,
638
+ },
632
639
  keras: {
633
640
  prettyLabel: "Keras",
634
641
  repoName: "Keras",
@@ -1010,6 +1017,13 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
1010
1017
  filter: false,
1011
1018
  countDownloads: `path_extension:"setup.py"`,
1012
1019
  },
1020
+ qwen3_tts: {
1021
+ prettyLabel: "Qwen3-TTS",
1022
+ repoName: "Qwen3-TTS",
1023
+ repoUrl: "https://github.com/QwenLM/Qwen3-TTS",
1024
+ snippets: snippets.qwen3_tts,
1025
+ filter: false,
1026
+ },
1013
1027
  recurrentgemma: {
1014
1028
  prettyLabel: "RecurrentGemma",
1015
1029
  repoName: "recurrentgemma",
@@ -1418,6 +1432,13 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
1418
1432
  countDownloads: `path_extension:"pkl"`,
1419
1433
  snippets: snippets.vfimamba,
1420
1434
  },
1435
+ vismatch: {
1436
+ prettyLabel: "VisMatch",
1437
+ repoName: "VisMatch",
1438
+ repoUrl: "https://github.com/gmberton/vismatch",
1439
+ filter: false,
1440
+ countDownloads: `path:"vismatch.yaml"`,
1441
+ },
1421
1442
  lvface: {
1422
1443
  prettyLabel: "LVFace",
1423
1444
  repoName: "LVFace",