@huggingface/tasks 0.12.20 → 0.12.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -4606,6 +4606,29 @@ depth = model.infer_image(raw_img) # HxW raw depth map in numpy
4606
4606
  `
4607
4607
  ];
4608
4608
  };
4609
+ var depth_pro = (model) => {
4610
+ const installSnippet = `# Download checkpoint
4611
+ pip install huggingface-hub
4612
+ huggingface-cli download --local-dir checkpoints ${model.id}`;
4613
+ const inferenceSnippet = `import depth_pro
4614
+
4615
+ # Load model and preprocessing transform
4616
+ model, transform = depth_pro.create_model_and_transforms()
4617
+ model.eval()
4618
+
4619
+ # Load and preprocess an image.
4620
+ image, _, f_px = depth_pro.load_rgb("example.png")
4621
+ image = transform(image)
4622
+
4623
+ # Run inference.
4624
+ prediction = model.infer(image, f_px=f_px)
4625
+
4626
+ # Results: 1. Depth in meters
4627
+ depth = prediction["depth"]
4628
+ # Results: 2. Focal length in pixels
4629
+ focallength_px = prediction["focallength_px"]`;
4630
+ return [installSnippet, inferenceSnippet];
4631
+ };
4609
4632
  var diffusersDefaultPrompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k";
4610
4633
  var diffusers_default = (model) => [
4611
4634
  `from diffusers import DiffusionPipeline
@@ -5515,6 +5538,14 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5515
5538
  filter: false,
5516
5539
  countDownloads: `path_extension:"pth"`
5517
5540
  },
5541
+ "depth-pro": {
5542
+ prettyLabel: "Depth Pro",
5543
+ repoName: "Depth Pro",
5544
+ repoUrl: "https://github.com/apple/ml-depth-pro",
5545
+ countDownloads: `path_extension:"pt"`,
5546
+ snippets: depth_pro,
5547
+ filter: false
5548
+ },
5518
5549
  diffree: {
5519
5550
  prettyLabel: "Diffree",
5520
5551
  repoName: "Diffree",
@@ -6008,6 +6039,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
6008
6039
  filter: true,
6009
6040
  countDownloads: `path:"models/default.zip"`
6010
6041
  },
6042
+ "f5-tts": {
6043
+ prettyLabel: "F5-TTS",
6044
+ repoName: "F5-TTS",
6045
+ repoUrl: "https://github.com/SWivid/F5-TTS",
6046
+ filter: false,
6047
+ countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`
6048
+ },
6011
6049
  tensorflowtts: {
6012
6050
  prettyLabel: "TensorFlowTTS",
6013
6051
  repoName: "TensorFlowTTS",
@@ -7146,6 +7184,7 @@ var SKUS = {
7146
7184
  };
7147
7185
 
7148
7186
  // src/local-apps.ts
7187
+ var import_gguf = require("@huggingface/gguf");
7149
7188
  function isAwqModel(model) {
7150
7189
  return model.config?.quantization_config?.quant_method === "awq";
7151
7190
  }
@@ -7161,9 +7200,15 @@ function isMarlinModel(model) {
7161
7200
  function isTransformersModel(model) {
7162
7201
  return model.tags.includes("transformers");
7163
7202
  }
7203
+ function isTgiModel(model) {
7204
+ return model.tags.includes("text-generation-inference");
7205
+ }
7164
7206
  function isLlamaCppGgufModel(model) {
7165
7207
  return !!model.gguf?.context_length;
7166
7208
  }
7209
+ function isMlxModel(model) {
7210
+ return model.tags.includes("mlx");
7211
+ }
7167
7212
  var snippetLlamacpp = (model, filepath) => {
7168
7213
  const command = (binary) => [
7169
7214
  "# Load and run the model:",
@@ -7199,6 +7244,30 @@ var snippetLlamacpp = (model, filepath) => {
7199
7244
  }
7200
7245
  ];
7201
7246
  };
7247
+ var snippetNodeLlamaCppCli = (model, filepath) => {
7248
+ return [
7249
+ {
7250
+ title: "Chat with the model",
7251
+ content: [
7252
+ `npx -y node-llama-cpp chat \\`,
7253
+ ` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
7254
+ ` --prompt 'Hi there!'`
7255
+ ].join("\n")
7256
+ },
7257
+ {
7258
+ title: "Estimate the model compatibility with your hardware",
7259
+ content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`
7260
+ }
7261
+ ];
7262
+ };
7263
+ var snippetOllama = (model, filepath) => {
7264
+ if (filepath) {
7265
+ const quantLabel = (0, import_gguf.parseGGUFQuantLabel)(filepath);
7266
+ const ollamatag = quantLabel ? `:${quantLabel}` : "";
7267
+ return `ollama run hf.co/${model.id}${ollamatag}`;
7268
+ }
7269
+ return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
7270
+ };
7202
7271
  var snippetLocalAI = (model, filepath) => {
7203
7272
  const command = (binary) => ["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
7204
7273
  return [
@@ -7260,6 +7329,34 @@ docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7260
7329
  }
7261
7330
  ];
7262
7331
  };
7332
+ var snippetTgi = (model) => {
7333
+ const runCommand = [
7334
+ "# Call the server using curl:",
7335
+ `curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
7336
+ ` -H "Content-Type: application/json" \\`,
7337
+ ` --data '{`,
7338
+ ` "model": "${model.id}",`,
7339
+ ` "messages": [`,
7340
+ ` {"role": "user", "content": "What is the capital of France?"}`,
7341
+ ` ]`,
7342
+ ` }'`
7343
+ ];
7344
+ return [
7345
+ {
7346
+ title: "Use Docker images",
7347
+ setup: [
7348
+ "# Deploy with docker on Linux:",
7349
+ `docker run --gpus all \\`,
7350
+ ` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
7351
+ ` -e HF_TOKEN="<secret>" \\`,
7352
+ ` -p 8000:80 \\`,
7353
+ ` ghcr.io/huggingface/text-generation-inference:latest \\`,
7354
+ ` --model-id ${model.id}`
7355
+ ].join("\n"),
7356
+ content: [runCommand.join("\n")]
7357
+ }
7358
+ ];
7359
+ };
7263
7360
  var LOCAL_APPS = {
7264
7361
  "llama.cpp": {
7265
7362
  prettyLabel: "llama.cpp",
@@ -7268,6 +7365,13 @@ var LOCAL_APPS = {
7268
7365
  displayOnModelPage: isLlamaCppGgufModel,
7269
7366
  snippet: snippetLlamacpp
7270
7367
  },
7368
+ "node-llama-cpp": {
7369
+ prettyLabel: "node-llama-cpp",
7370
+ docsUrl: "https://node-llama-cpp.withcat.ai",
7371
+ mainTask: "text-generation",
7372
+ displayOnModelPage: isLlamaCppGgufModel,
7373
+ snippet: snippetNodeLlamaCppCli
7374
+ },
7271
7375
  vllm: {
7272
7376
  prettyLabel: "vLLM",
7273
7377
  docsUrl: "https://docs.vllm.ai",
@@ -7275,11 +7379,18 @@ var LOCAL_APPS = {
7275
7379
  displayOnModelPage: (model) => (isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model)) && (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
7276
7380
  snippet: snippetVllm
7277
7381
  },
7382
+ tgi: {
7383
+ prettyLabel: "TGI",
7384
+ docsUrl: "https://huggingface.co/docs/text-generation-inference/",
7385
+ mainTask: "text-generation",
7386
+ displayOnModelPage: isTgiModel,
7387
+ snippet: snippetTgi
7388
+ },
7278
7389
  lmstudio: {
7279
7390
  prettyLabel: "LM Studio",
7280
7391
  docsUrl: "https://lmstudio.ai",
7281
7392
  mainTask: "text-generation",
7282
- displayOnModelPage: isLlamaCppGgufModel,
7393
+ displayOnModelPage: (model) => isLlamaCppGgufModel(model) || isMlxModel(model),
7283
7394
  deeplink: (model, filepath) => new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`)
7284
7395
  },
7285
7396
  localai: {
@@ -7376,6 +7487,13 @@ var LOCAL_APPS = {
7376
7487
  mainTask: "text-to-image",
7377
7488
  displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
7378
7489
  deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`)
7490
+ },
7491
+ ollama: {
7492
+ prettyLabel: "Ollama",
7493
+ docsUrl: "https://ollama.com",
7494
+ mainTask: "text-generation",
7495
+ displayOnModelPage: isLlamaCppGgufModel,
7496
+ snippet: snippetOllama
7379
7497
  }
7380
7498
  };
7381
7499
 
package/dist/index.js CHANGED
@@ -4568,6 +4568,29 @@ depth = model.infer_image(raw_img) # HxW raw depth map in numpy
4568
4568
  `
4569
4569
  ];
4570
4570
  };
4571
+ var depth_pro = (model) => {
4572
+ const installSnippet = `# Download checkpoint
4573
+ pip install huggingface-hub
4574
+ huggingface-cli download --local-dir checkpoints ${model.id}`;
4575
+ const inferenceSnippet = `import depth_pro
4576
+
4577
+ # Load model and preprocessing transform
4578
+ model, transform = depth_pro.create_model_and_transforms()
4579
+ model.eval()
4580
+
4581
+ # Load and preprocess an image.
4582
+ image, _, f_px = depth_pro.load_rgb("example.png")
4583
+ image = transform(image)
4584
+
4585
+ # Run inference.
4586
+ prediction = model.infer(image, f_px=f_px)
4587
+
4588
+ # Results: 1. Depth in meters
4589
+ depth = prediction["depth"]
4590
+ # Results: 2. Focal length in pixels
4591
+ focallength_px = prediction["focallength_px"]`;
4592
+ return [installSnippet, inferenceSnippet];
4593
+ };
4571
4594
  var diffusersDefaultPrompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k";
4572
4595
  var diffusers_default = (model) => [
4573
4596
  `from diffusers import DiffusionPipeline
@@ -5477,6 +5500,14 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5477
5500
  filter: false,
5478
5501
  countDownloads: `path_extension:"pth"`
5479
5502
  },
5503
+ "depth-pro": {
5504
+ prettyLabel: "Depth Pro",
5505
+ repoName: "Depth Pro",
5506
+ repoUrl: "https://github.com/apple/ml-depth-pro",
5507
+ countDownloads: `path_extension:"pt"`,
5508
+ snippets: depth_pro,
5509
+ filter: false
5510
+ },
5480
5511
  diffree: {
5481
5512
  prettyLabel: "Diffree",
5482
5513
  repoName: "Diffree",
@@ -5970,6 +6001,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5970
6001
  filter: true,
5971
6002
  countDownloads: `path:"models/default.zip"`
5972
6003
  },
6004
+ "f5-tts": {
6005
+ prettyLabel: "F5-TTS",
6006
+ repoName: "F5-TTS",
6007
+ repoUrl: "https://github.com/SWivid/F5-TTS",
6008
+ filter: false,
6009
+ countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`
6010
+ },
5973
6011
  tensorflowtts: {
5974
6012
  prettyLabel: "TensorFlowTTS",
5975
6013
  repoName: "TensorFlowTTS",
@@ -7108,6 +7146,7 @@ var SKUS = {
7108
7146
  };
7109
7147
 
7110
7148
  // src/local-apps.ts
7149
+ import { parseGGUFQuantLabel } from "@huggingface/gguf";
7111
7150
  function isAwqModel(model) {
7112
7151
  return model.config?.quantization_config?.quant_method === "awq";
7113
7152
  }
@@ -7123,9 +7162,15 @@ function isMarlinModel(model) {
7123
7162
  function isTransformersModel(model) {
7124
7163
  return model.tags.includes("transformers");
7125
7164
  }
7165
+ function isTgiModel(model) {
7166
+ return model.tags.includes("text-generation-inference");
7167
+ }
7126
7168
  function isLlamaCppGgufModel(model) {
7127
7169
  return !!model.gguf?.context_length;
7128
7170
  }
7171
+ function isMlxModel(model) {
7172
+ return model.tags.includes("mlx");
7173
+ }
7129
7174
  var snippetLlamacpp = (model, filepath) => {
7130
7175
  const command = (binary) => [
7131
7176
  "# Load and run the model:",
@@ -7161,6 +7206,30 @@ var snippetLlamacpp = (model, filepath) => {
7161
7206
  }
7162
7207
  ];
7163
7208
  };
7209
+ var snippetNodeLlamaCppCli = (model, filepath) => {
7210
+ return [
7211
+ {
7212
+ title: "Chat with the model",
7213
+ content: [
7214
+ `npx -y node-llama-cpp chat \\`,
7215
+ ` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
7216
+ ` --prompt 'Hi there!'`
7217
+ ].join("\n")
7218
+ },
7219
+ {
7220
+ title: "Estimate the model compatibility with your hardware",
7221
+ content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`
7222
+ }
7223
+ ];
7224
+ };
7225
+ var snippetOllama = (model, filepath) => {
7226
+ if (filepath) {
7227
+ const quantLabel = parseGGUFQuantLabel(filepath);
7228
+ const ollamatag = quantLabel ? `:${quantLabel}` : "";
7229
+ return `ollama run hf.co/${model.id}${ollamatag}`;
7230
+ }
7231
+ return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
7232
+ };
7164
7233
  var snippetLocalAI = (model, filepath) => {
7165
7234
  const command = (binary) => ["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
7166
7235
  return [
@@ -7222,6 +7291,34 @@ docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7222
7291
  }
7223
7292
  ];
7224
7293
  };
7294
+ var snippetTgi = (model) => {
7295
+ const runCommand = [
7296
+ "# Call the server using curl:",
7297
+ `curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
7298
+ ` -H "Content-Type: application/json" \\`,
7299
+ ` --data '{`,
7300
+ ` "model": "${model.id}",`,
7301
+ ` "messages": [`,
7302
+ ` {"role": "user", "content": "What is the capital of France?"}`,
7303
+ ` ]`,
7304
+ ` }'`
7305
+ ];
7306
+ return [
7307
+ {
7308
+ title: "Use Docker images",
7309
+ setup: [
7310
+ "# Deploy with docker on Linux:",
7311
+ `docker run --gpus all \\`,
7312
+ ` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
7313
+ ` -e HF_TOKEN="<secret>" \\`,
7314
+ ` -p 8000:80 \\`,
7315
+ ` ghcr.io/huggingface/text-generation-inference:latest \\`,
7316
+ ` --model-id ${model.id}`
7317
+ ].join("\n"),
7318
+ content: [runCommand.join("\n")]
7319
+ }
7320
+ ];
7321
+ };
7225
7322
  var LOCAL_APPS = {
7226
7323
  "llama.cpp": {
7227
7324
  prettyLabel: "llama.cpp",
@@ -7230,6 +7327,13 @@ var LOCAL_APPS = {
7230
7327
  displayOnModelPage: isLlamaCppGgufModel,
7231
7328
  snippet: snippetLlamacpp
7232
7329
  },
7330
+ "node-llama-cpp": {
7331
+ prettyLabel: "node-llama-cpp",
7332
+ docsUrl: "https://node-llama-cpp.withcat.ai",
7333
+ mainTask: "text-generation",
7334
+ displayOnModelPage: isLlamaCppGgufModel,
7335
+ snippet: snippetNodeLlamaCppCli
7336
+ },
7233
7337
  vllm: {
7234
7338
  prettyLabel: "vLLM",
7235
7339
  docsUrl: "https://docs.vllm.ai",
@@ -7237,11 +7341,18 @@ var LOCAL_APPS = {
7237
7341
  displayOnModelPage: (model) => (isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model)) && (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
7238
7342
  snippet: snippetVllm
7239
7343
  },
7344
+ tgi: {
7345
+ prettyLabel: "TGI",
7346
+ docsUrl: "https://huggingface.co/docs/text-generation-inference/",
7347
+ mainTask: "text-generation",
7348
+ displayOnModelPage: isTgiModel,
7349
+ snippet: snippetTgi
7350
+ },
7240
7351
  lmstudio: {
7241
7352
  prettyLabel: "LM Studio",
7242
7353
  docsUrl: "https://lmstudio.ai",
7243
7354
  mainTask: "text-generation",
7244
- displayOnModelPage: isLlamaCppGgufModel,
7355
+ displayOnModelPage: (model) => isLlamaCppGgufModel(model) || isMlxModel(model),
7245
7356
  deeplink: (model, filepath) => new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`)
7246
7357
  },
7247
7358
  localai: {
@@ -7338,6 +7449,13 @@ var LOCAL_APPS = {
7338
7449
  mainTask: "text-to-image",
7339
7450
  displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
7340
7451
  deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`)
7452
+ },
7453
+ ollama: {
7454
+ prettyLabel: "Ollama",
7455
+ docsUrl: "https://ollama.com",
7456
+ mainTask: "text-generation",
7457
+ displayOnModelPage: isLlamaCppGgufModel,
7458
+ snippet: snippetOllama
7341
7459
  }
7342
7460
  };
7343
7461
 
@@ -48,9 +48,11 @@ export type LocalApp = {
48
48
  /**
49
49
  * And if not (mostly llama.cpp), snippet to copy/paste in your terminal
50
50
  * Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
51
+ * Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
51
52
  */
52
53
  snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];
53
54
  });
55
+ declare function isTgiModel(model: ModelData): boolean;
54
56
  declare function isLlamaCppGgufModel(model: ModelData): boolean;
55
57
  /**
56
58
  * Add your new local app here.
@@ -71,6 +73,13 @@ export declare const LOCAL_APPS: {
71
73
  displayOnModelPage: typeof isLlamaCppGgufModel;
72
74
  snippet: (model: ModelData, filepath?: string) => LocalAppSnippet[];
73
75
  };
76
+ "node-llama-cpp": {
77
+ prettyLabel: string;
78
+ docsUrl: string;
79
+ mainTask: "text-generation";
80
+ displayOnModelPage: typeof isLlamaCppGgufModel;
81
+ snippet: (model: ModelData, filepath?: string) => LocalAppSnippet[];
82
+ };
74
83
  vllm: {
75
84
  prettyLabel: string;
76
85
  docsUrl: string;
@@ -78,11 +87,18 @@ export declare const LOCAL_APPS: {
78
87
  displayOnModelPage: (model: ModelData) => boolean;
79
88
  snippet: (model: ModelData) => LocalAppSnippet[];
80
89
  };
90
+ tgi: {
91
+ prettyLabel: string;
92
+ docsUrl: string;
93
+ mainTask: "text-generation";
94
+ displayOnModelPage: typeof isTgiModel;
95
+ snippet: (model: ModelData) => LocalAppSnippet[];
96
+ };
81
97
  lmstudio: {
82
98
  prettyLabel: string;
83
99
  docsUrl: string;
84
100
  mainTask: "text-generation";
85
- displayOnModelPage: typeof isLlamaCppGgufModel;
101
+ displayOnModelPage: (model: ModelData) => boolean;
86
102
  deeplink: (model: ModelData, filepath: string | undefined) => URL;
87
103
  };
88
104
  localai: {
@@ -166,6 +182,13 @@ export declare const LOCAL_APPS: {
166
182
  displayOnModelPage: (model: ModelData) => boolean;
167
183
  deeplink: (model: ModelData) => URL;
168
184
  };
185
+ ollama: {
186
+ prettyLabel: string;
187
+ docsUrl: string;
188
+ mainTask: "text-generation";
189
+ displayOnModelPage: typeof isLlamaCppGgufModel;
190
+ snippet: (model: ModelData, filepath?: string) => string;
191
+ };
169
192
  };
170
193
  export type LocalAppKey = keyof typeof LOCAL_APPS;
171
194
  export {};
@@ -1 +1 @@
1
- {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAsBF,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAqGD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBA9GS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCA0HlD,SAAS;yBA9DX,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAvB1B,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA4M3C,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
1
+ {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAGhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;;OAIG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAqBF,iBAAS,UAAU,CAAC,KAAK,EAAE,SAAS,GAAG,OAAO,CAE7C;AAED,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AA+JD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBApKS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;yBAqCzC,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCAkJzD,SAAS;yBAjGX,SAAS,KAAG,eAAe,EAAE;;;;;;;yBAsC9B,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBA7DzB,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;yBATjD,SAAS,aAAa,MAAM,KAAG,MAAM;;CAsQ/B,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
@@ -6,6 +6,7 @@ export declare const audioseal: (model: ModelData) => string[];
6
6
  export declare const bertopic: (model: ModelData) => string[];
7
7
  export declare const bm25s: (model: ModelData) => string[];
8
8
  export declare const depth_anything_v2: (model: ModelData) => string[];
9
+ export declare const depth_pro: (model: ModelData) => string[];
9
10
  export declare const diffusers: (model: ModelData) => string[];
10
11
  export declare const diffusionkit: (model: ModelData) => string[];
11
12
  export declare const cartesia_pytorch: (model: ModelData) => string[];
@@ -1 +1 @@
1
- {"version":3,"file":"model-libraries-snippets.d.ts","sourceRoot":"","sources":["../../src/model-libraries-snippets.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAe9C,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAkBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAkBlD,CAAC;AAaF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,iBAAiB,UAAW,SAAS,KAAG,MAAM,EA6C1D,CAAC;AAuCF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EAwCrD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAgBzD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EAmBrD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAgB/C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAMlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EASlD,CAAC;AAIF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAO/C,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAMhD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAS9C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAgBzD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAOjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAIlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,aAAa,QAAO,MAAM,EAQtC,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAsBlD,CAAC;AAEF,eAAO,MAAM,uBAAuB,UAAW,SAAS,KAAG,MAAM,EAehE,CAAC;AAiBF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAyBF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAOtD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAI7C,CAAC;AAEF,eAAO,MAAM,OAAO,QAA6B,MAAM,EAQtD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAanC,CAAC;AAsCF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAehD,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,SAAS,KAAG,MAAM,EAmC3D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EA2B7C,CAAC;AAEF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAEtD,CAAC;AAEF,eAAO,MAAM,oBAAoB,UAAW,SAAS,KAAG,MAAM,EAQ7D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAU9C,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAIpD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAK/C,CAAC;AAkBF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAkBpD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EA4CrD,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAcvD,CAAC;AAiBF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAkB7C,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAMzD,CAAC;AAgBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAEjD,CAAC;AAEF,eAAO,MAAM,MAAM,QAA6B,MAAM,EAMrD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAInD,CAAC;AAEF,eAAO,MAAM,OAAO,QAAO,MAAM,EAYhC,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAOhD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAYjD,CAAC;AAEF,eAAO,MAAM,GAAG,UAAW,SAAS,KAAG,MAAM,EAK5C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAIlD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAQ7C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AA6BF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAUnD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAYnC,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC"}
1
+ {"version":3,"file":"model-libraries-snippets.d.ts","sourceRoot":"","sources":["../../src/model-libraries-snippets.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAe9C,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAkBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAkBlD,CAAC;AAaF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,iBAAiB,UAAW,SAAS,KAAG,MAAM,EA6C1D,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAwBlD,CAAC;AAuCF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EAwCrD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAgBzD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EAmBrD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAgB/C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAMlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EASlD,CAAC;AAIF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAO/C,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAMhD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAS9C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAgBzD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAOjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAIlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,aAAa,QAAO,MAAM,EAQtC,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAsBlD,CAAC;AAEF,eAAO,MAAM,uBAAuB,UAAW,SAAS,KAAG,MAAM,EAehE,CAAC;AAiBF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAyBF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAOtD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAI7C,CAAC;AAEF,eAAO,MAAM,OAAO,QAA6B,MAAM,EAQtD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAanC,CAAC;AAsCF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAehD,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,SAAS,KAAG,MAAM,EAmC3D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EA2B7C,CAAC;AAEF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAEtD,CAAC;AAEF,eAAO,MAAM,oBAAoB,UAAW,SAAS,KAAG,MAAM,EAQ7D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAU9C,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAIpD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAK/C,CAAC;AAkBF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAkBpD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EA4CrD,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAcvD,CAAC;AAiBF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAkB7C,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAMzD,CAAC;AAgBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAEjD,CAAC;AAEF,eAAO,MAAM,MAAM,QAA6B,MAAM,EAMrD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAInD,CAAC;AAEF,eAAO,MAAM,OAAO,QAAO,MAAM,EAYhC,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAOhD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAYjD,CAAC;AAEF,eAAO,MAAM,GAAG,UAAW,SAAS,KAAG,MAAM,EAK5C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAIlD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAQ7C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AA6BF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAUnD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAYnC,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC"}
@@ -161,6 +161,14 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
161
161
  filter: false;
162
162
  countDownloads: string;
163
163
  };
164
+ "depth-pro": {
165
+ prettyLabel: string;
166
+ repoName: string;
167
+ repoUrl: string;
168
+ countDownloads: string;
169
+ snippets: (model: ModelData) => string[];
170
+ filter: false;
171
+ };
164
172
  diffree: {
165
173
  prettyLabel: string;
166
174
  repoName: string;
@@ -652,6 +660,13 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
652
660
  filter: true;
653
661
  countDownloads: string;
654
662
  };
663
+ "f5-tts": {
664
+ prettyLabel: string;
665
+ repoName: string;
666
+ repoUrl: string;
667
+ filter: false;
668
+ countDownloads: string;
669
+ };
655
670
  tensorflowtts: {
656
671
  prettyLabel: string;
657
672
  repoName: string;
@@ -744,6 +759,6 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
744
759
  };
745
760
  };
746
761
  export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
747
- export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
748
- export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
762
+ export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
763
+ export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
749
764
  //# sourceMappingURL=model-libraries.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAmrBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,uwCAAgE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,uwCAQ1B,CAAC"}
1
+ {"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAksBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,gyCAAgE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,gyCAQ1B,CAAC"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.12.20",
4
+ "version": "0.12.22",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
@@ -36,6 +36,9 @@
36
36
  "quicktype-core": "https://github.com/huggingface/quicktype/raw/pack-18.0.17/packages/quicktype-core/quicktype-core-18.0.17.tgz",
37
37
  "type-fest": "^3.13.1"
38
38
  },
39
+ "dependencies": {
40
+ "@huggingface/gguf": "^0.1.12"
41
+ },
39
42
  "scripts": {
40
43
  "lint": "eslint --quiet --fix --ext .cjs,.ts .",
41
44
  "lint:check": "eslint --ext .cjs,.ts .",
package/src/local-apps.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  import type { ModelData } from "./model-data";
2
2
  import type { PipelineType } from "./pipelines";
3
+ import { parseGGUFQuantLabel } from "@huggingface/gguf";
3
4
 
4
5
  export interface LocalAppSnippet {
5
6
  /**
@@ -53,6 +54,7 @@ export type LocalApp = {
53
54
  /**
54
55
  * And if not (mostly llama.cpp), snippet to copy/paste in your terminal
55
56
  * Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
57
+ * Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
56
58
  */
57
59
  snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];
58
60
  }
@@ -77,11 +79,18 @@ function isMarlinModel(model: ModelData): boolean {
77
79
  function isTransformersModel(model: ModelData): boolean {
78
80
  return model.tags.includes("transformers");
79
81
  }
82
+ function isTgiModel(model: ModelData): boolean {
83
+ return model.tags.includes("text-generation-inference");
84
+ }
80
85
 
81
86
  function isLlamaCppGgufModel(model: ModelData) {
82
87
  return !!model.gguf?.context_length;
83
88
  }
84
89
 
90
+ function isMlxModel(model: ModelData) {
91
+ return model.tags.includes("mlx");
92
+ }
93
+
85
94
  const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
86
95
  const command = (binary: string) =>
87
96
  [
@@ -119,6 +128,32 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
119
128
  ];
120
129
  };
121
130
 
131
+ const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
132
+ return [
133
+ {
134
+ title: "Chat with the model",
135
+ content: [
136
+ `npx -y node-llama-cpp chat \\`,
137
+ ` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
138
+ ` --prompt 'Hi there!'`,
139
+ ].join("\n"),
140
+ },
141
+ {
142
+ title: "Estimate the model compatibility with your hardware",
143
+ content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
144
+ },
145
+ ];
146
+ };
147
+
148
+ const snippetOllama = (model: ModelData, filepath?: string): string => {
149
+ if (filepath) {
150
+ const quantLabel = parseGGUFQuantLabel(filepath);
151
+ const ollamatag = quantLabel ? `:${quantLabel}` : "";
152
+ return `ollama run hf.co/${model.id}${ollamatag}`;
153
+ }
154
+ return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
155
+ };
156
+
122
157
  const snippetLocalAI = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
123
158
  const command = (binary: string) =>
124
159
  ["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
@@ -180,6 +215,34 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
180
215
  },
181
216
  ];
182
217
  };
218
+ const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
219
+ const runCommand = [
220
+ "# Call the server using curl:",
221
+ `curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
222
+ ` -H "Content-Type: application/json" \\`,
223
+ ` --data '{`,
224
+ ` "model": "${model.id}",`,
225
+ ` "messages": [`,
226
+ ` {"role": "user", "content": "What is the capital of France?"}`,
227
+ ` ]`,
228
+ ` }'`,
229
+ ];
230
+ return [
231
+ {
232
+ title: "Use Docker images",
233
+ setup: [
234
+ "# Deploy with docker on Linux:",
235
+ `docker run --gpus all \\`,
236
+ ` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
237
+ ` -e HF_TOKEN="<secret>" \\`,
238
+ ` -p 8000:80 \\`,
239
+ ` ghcr.io/huggingface/text-generation-inference:latest \\`,
240
+ ` --model-id ${model.id}`,
241
+ ].join("\n"),
242
+ content: [runCommand.join("\n")],
243
+ },
244
+ ];
245
+ };
183
246
 
184
247
  /**
185
248
  * Add your new local app here.
@@ -200,6 +263,13 @@ export const LOCAL_APPS = {
200
263
  displayOnModelPage: isLlamaCppGgufModel,
201
264
  snippet: snippetLlamacpp,
202
265
  },
266
+ "node-llama-cpp": {
267
+ prettyLabel: "node-llama-cpp",
268
+ docsUrl: "https://node-llama-cpp.withcat.ai",
269
+ mainTask: "text-generation",
270
+ displayOnModelPage: isLlamaCppGgufModel,
271
+ snippet: snippetNodeLlamaCppCli,
272
+ },
203
273
  vllm: {
204
274
  prettyLabel: "vLLM",
205
275
  docsUrl: "https://docs.vllm.ai",
@@ -214,11 +284,18 @@ export const LOCAL_APPS = {
214
284
  (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
215
285
  snippet: snippetVllm,
216
286
  },
287
+ tgi: {
288
+ prettyLabel: "TGI",
289
+ docsUrl: "https://huggingface.co/docs/text-generation-inference/",
290
+ mainTask: "text-generation",
291
+ displayOnModelPage: isTgiModel,
292
+ snippet: snippetTgi,
293
+ },
217
294
  lmstudio: {
218
295
  prettyLabel: "LM Studio",
219
296
  docsUrl: "https://lmstudio.ai",
220
297
  mainTask: "text-generation",
221
- displayOnModelPage: isLlamaCppGgufModel,
298
+ displayOnModelPage: (model) => isLlamaCppGgufModel(model) || isMlxModel(model),
222
299
  deeplink: (model, filepath) =>
223
300
  new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`),
224
301
  },
@@ -323,6 +400,13 @@ export const LOCAL_APPS = {
323
400
  displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
324
401
  deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`),
325
402
  },
403
+ ollama: {
404
+ prettyLabel: "Ollama",
405
+ docsUrl: "https://ollama.com",
406
+ mainTask: "text-generation",
407
+ displayOnModelPage: isLlamaCppGgufModel,
408
+ snippet: snippetOllama,
409
+ },
326
410
  } satisfies Record<string, LocalApp>;
327
411
 
328
412
  export type LocalAppKey = keyof typeof LOCAL_APPS;
@@ -139,6 +139,32 @@ depth = model.infer_image(raw_img) # HxW raw depth map in numpy
139
139
  ];
140
140
  };
141
141
 
142
+ export const depth_pro = (model: ModelData): string[] => {
143
+ const installSnippet = `# Download checkpoint
144
+ pip install huggingface-hub
145
+ huggingface-cli download --local-dir checkpoints ${model.id}`;
146
+
147
+ const inferenceSnippet = `import depth_pro
148
+
149
+ # Load model and preprocessing transform
150
+ model, transform = depth_pro.create_model_and_transforms()
151
+ model.eval()
152
+
153
+ # Load and preprocess an image.
154
+ image, _, f_px = depth_pro.load_rgb("example.png")
155
+ image = transform(image)
156
+
157
+ # Run inference.
158
+ prediction = model.infer(image, f_px=f_px)
159
+
160
+ # Results: 1. Depth in meters
161
+ depth = prediction["depth"]
162
+ # Results: 2. Focal length in pixels
163
+ focallength_px = prediction["focallength_px"]`;
164
+
165
+ return [installSnippet, inferenceSnippet];
166
+ };
167
+
142
168
  const diffusersDefaultPrompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k";
143
169
 
144
170
  const diffusers_default = (model: ModelData) => [
@@ -165,6 +165,14 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
165
165
  filter: false,
166
166
  countDownloads: `path_extension:"pth"`,
167
167
  },
168
+ "depth-pro": {
169
+ prettyLabel: "Depth Pro",
170
+ repoName: "Depth Pro",
171
+ repoUrl: "https://github.com/apple/ml-depth-pro",
172
+ countDownloads: `path_extension:"pt"`,
173
+ snippets: snippets.depth_pro,
174
+ filter: false,
175
+ },
168
176
  diffree: {
169
177
  prettyLabel: "Diffree",
170
178
  repoName: "Diffree",
@@ -658,6 +666,13 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
658
666
  filter: true,
659
667
  countDownloads: `path:"models/default.zip"`,
660
668
  },
669
+ "f5-tts": {
670
+ prettyLabel: "F5-TTS",
671
+ repoName: "F5-TTS",
672
+ repoUrl: "https://github.com/SWivid/F5-TTS",
673
+ filter: false,
674
+ countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`,
675
+ },
661
676
  tensorflowtts: {
662
677
  prettyLabel: "TensorFlowTTS",
663
678
  repoName: "TensorFlowTTS",