@huggingface/tasks 0.12.27 → 0.12.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -24,6 +24,9 @@ __export(src_exports, {
24
24
  ALL_MODEL_LIBRARY_KEYS: () => ALL_MODEL_LIBRARY_KEYS,
25
25
  DATASET_LIBRARIES_UI_ELEMENTS: () => DATASET_LIBRARIES_UI_ELEMENTS,
26
26
  DEFAULT_MEMORY_OPTIONS: () => DEFAULT_MEMORY_OPTIONS,
27
+ GGMLQuantizationType: () => GGMLQuantizationType,
28
+ GGUF_QUANT_RE: () => GGUF_QUANT_RE,
29
+ GGUF_QUANT_RE_GLOBAL: () => GGUF_QUANT_RE_GLOBAL,
27
30
  LIBRARY_TASK_MAPPING: () => LIBRARY_TASK_MAPPING,
28
31
  LOCAL_APPS: () => LOCAL_APPS,
29
32
  MAPPING_DEFAULT_WIDGET: () => MAPPING_DEFAULT_WIDGET,
@@ -38,6 +41,7 @@ __export(src_exports, {
38
41
  SUBTASK_TYPES: () => SUBTASK_TYPES,
39
42
  TASKS_DATA: () => TASKS_DATA,
40
43
  TASKS_MODEL_LIBRARIES: () => TASKS_MODEL_LIBRARIES,
44
+ parseGGUFQuantLabel: () => parseGGUFQuantLabel,
41
45
  snippets: () => snippets_exports
42
46
  });
43
47
  module.exports = __toCommonJS(src_exports);
@@ -795,7 +799,7 @@ var MAPPING_DEFAULT_WIDGET = /* @__PURE__ */ new Map([
795
799
  ]);
796
800
 
797
801
  // src/pipelines.ts
798
- var MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
802
+ var MODALITIES = ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];
799
803
  var MODALITY_LABELS = {
800
804
  multimodal: "Multimodal",
801
805
  nlp: "Natural Language Processing",
@@ -4835,9 +4839,9 @@ var gliner = (model) => [
4835
4839
  model = GLiNER.from_pretrained("${model.id}")`
4836
4840
  ];
4837
4841
  var keras = (model) => [
4838
- `# Available backend options are: "jax", "tensorflow", "torch".
4842
+ `# Available backend options are: "jax", "torch", "tensorflow".
4839
4843
  import os
4840
- os.environ["KERAS_BACKEND"] = "tensorflow"
4844
+ os.environ["KERAS_BACKEND"] = "jax"
4841
4845
 
4842
4846
  import keras
4843
4847
 
@@ -4845,9 +4849,9 @@ model = keras.saving.load_model("hf://${model.id}")
4845
4849
  `
4846
4850
  ];
4847
4851
  var keras_nlp = (model) => [
4848
- `# Available backend options are: "jax", "tensorflow", "torch".
4852
+ `# Available backend options are: "jax", "torch", "tensorflow".
4849
4853
  import os
4850
- os.environ["KERAS_BACKEND"] = "tensorflow"
4854
+ os.environ["KERAS_BACKEND"] = "jax"
4851
4855
 
4852
4856
  import keras_nlp
4853
4857
 
@@ -4855,6 +4859,20 @@ tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}")
4855
4859
  backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
4856
4860
  `
4857
4861
  ];
4862
+ var keras_hub = (model) => [
4863
+ `# Available backend options are: "jax", "torch", "tensorflow".
4864
+ import os
4865
+ os.environ["KERAS_BACKEND"] = "jax"
4866
+
4867
+ import keras_hub
4868
+
4869
+ # Load a task-specific model (*replace CausalLM with your task*)
4870
+ model = keras_hub.models.CausalLM.from_preset("hf://${model.id}", dtype="bfloat16")
4871
+
4872
+ # Possible tasks are CausalLM, TextToImage, ImageClassifier, ...
4873
+ # full list here: https://keras.io/api/keras_hub/models/#api-documentation
4874
+ `
4875
+ ];
4858
4876
  var llama_cpp_python = (model) => [
4859
4877
  `from llama_cpp import Llama
4860
4878
 
@@ -5769,16 +5787,23 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5769
5787
  repoUrl: "https://github.com/keras-team/tf-keras",
5770
5788
  docsUrl: "https://huggingface.co/docs/hub/tf-keras",
5771
5789
  snippets: tf_keras,
5772
- filter: true,
5773
5790
  countDownloads: `path:"saved_model.pb"`
5774
5791
  },
5775
5792
  "keras-nlp": {
5776
5793
  prettyLabel: "KerasNLP",
5777
5794
  repoName: "KerasNLP",
5778
- repoUrl: "https://keras.io/keras_nlp/",
5779
- docsUrl: "https://github.com/keras-team/keras-nlp",
5795
+ repoUrl: "https://github.com/keras-team/keras-nlp",
5796
+ docsUrl: "https://keras.io/keras_nlp/",
5780
5797
  snippets: keras_nlp
5781
5798
  },
5799
+ "keras-hub": {
5800
+ prettyLabel: "KerasHub",
5801
+ repoName: "KerasHub",
5802
+ repoUrl: "https://github.com/keras-team/keras-hub",
5803
+ docsUrl: "https://keras.io/keras_hub/",
5804
+ snippets: keras_hub,
5805
+ filter: true
5806
+ },
5782
5807
  k2: {
5783
5808
  prettyLabel: "K2",
5784
5809
  repoName: "k2",
@@ -5797,6 +5822,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5797
5822
  repoUrl: "https://github.com/abetlen/llama-cpp-python",
5798
5823
  snippets: llama_cpp_python
5799
5824
  },
5825
+ "mini-omni2": {
5826
+ prettyLabel: "Mini-Omni2",
5827
+ repoName: "Mini-Omni2",
5828
+ repoUrl: "https://github.com/gpt-omni/mini-omni2",
5829
+ countDownloads: `path:"model_config.yaml"`
5830
+ },
5800
5831
  mindspore: {
5801
5832
  prettyLabel: "MindSpore",
5802
5833
  repoName: "mindspore",
@@ -5880,6 +5911,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5880
5911
  filter: true,
5881
5912
  countDownloads: `path_extension:"nemo" OR path:"model_config.yaml"`
5882
5913
  },
5914
+ "open-oasis": {
5915
+ prettyLabel: "open-oasis",
5916
+ repoName: "open-oasis",
5917
+ repoUrl: "https://github.com/etched-ai/open-oasis",
5918
+ countDownloads: `path:"oasis500m.pt"`
5919
+ },
5883
5920
  open_clip: {
5884
5921
  prettyLabel: "OpenCLIP",
5885
5922
  repoName: "OpenCLIP",
@@ -6104,6 +6141,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
6104
6141
  filter: false,
6105
6142
  countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`
6106
6143
  },
6144
+ genmo: {
6145
+ prettyLabel: "Genmo",
6146
+ repoName: "Genmo",
6147
+ repoUrl: "https://github.com/genmoai/models",
6148
+ filter: false,
6149
+ countDownloads: `path:"vae_stats.json"`
6150
+ },
6107
6151
  tensorflowtts: {
6108
6152
  prettyLabel: "TensorFlowTTS",
6109
6153
  repoName: "TensorFlowTTS",
@@ -6250,7 +6294,28 @@ var inputsQuestionAnswering = () => `{
6250
6294
  }`;
6251
6295
  var inputsTextClassification = () => `"I like you. I love you"`;
6252
6296
  var inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
6253
- var inputsTextGeneration = () => `"Can you please let us know more details about your "`;
6297
+ var inputsTextGeneration = (model) => {
6298
+ if (model.tags.includes("conversational")) {
6299
+ return model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6300
+ {
6301
+ role: "user",
6302
+ content: [
6303
+ {
6304
+ type: "text",
6305
+ text: "Describe this image in one sentence."
6306
+ },
6307
+ {
6308
+ type: "image_url",
6309
+ image_url: {
6310
+ url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6311
+ }
6312
+ }
6313
+ ]
6314
+ }
6315
+ ];
6316
+ }
6317
+ return `"Can you please let us know more details about your "`;
6318
+ };
6254
6319
  var inputsText2TextGeneration = () => `"The answer to the universe is"`;
6255
6320
  var inputsFillMask = (model) => `"The answer to the universe is ${model.mask_token}."`;
6256
6321
  var inputsSentenceSimilarity = () => `{
@@ -6307,13 +6372,15 @@ function getModelInputSnippet(model, noWrap = false, noQuotes = false) {
6307
6372
  const inputs = modelInputSnippets[model.pipeline_tag];
6308
6373
  if (inputs) {
6309
6374
  let result = inputs(model);
6310
- if (noWrap) {
6311
- result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
6312
- }
6313
- if (noQuotes) {
6314
- const REGEX_QUOTES = /^"(.+)"$/s;
6315
- const match = result.match(REGEX_QUOTES);
6316
- result = match ? match[1] : result;
6375
+ if (typeof result === "string") {
6376
+ if (noWrap) {
6377
+ result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
6378
+ }
6379
+ if (noQuotes) {
6380
+ const REGEX_QUOTES = /^"(.+)"$/s;
6381
+ const match = result.match(REGEX_QUOTES);
6382
+ result = match ? match[1] : result;
6383
+ }
6317
6384
  }
6318
6385
  return result;
6319
6386
  }
@@ -6355,7 +6422,7 @@ function stringifyMessages(messages, opts) {
6355
6422
  if (opts.customContentEscaper) {
6356
6423
  content = opts.customContentEscaper(content);
6357
6424
  }
6358
- return `{ ${keyRole}: "${role}", ${keyContent}: ${content} }`;
6425
+ return `{ ${keyRole}: "${role}", ${keyContent}: [${content}] }`;
6359
6426
  }
6360
6427
  });
6361
6428
  return opts.start + messagesStringified.join(opts.sep) + opts.end;
@@ -6376,20 +6443,7 @@ var snippetBasic = (model, accessToken) => ({
6376
6443
  var snippetTextGeneration = (model, accessToken, opts) => {
6377
6444
  if (model.tags.includes("conversational")) {
6378
6445
  const streaming = opts?.streaming ?? true;
6379
- const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6380
- {
6381
- role: "user",
6382
- content: [
6383
- {
6384
- type: "image_url",
6385
- image_url: {
6386
- url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6387
- }
6388
- },
6389
- { type: "text", text: "Describe this image in one sentence." }
6390
- ]
6391
- }
6392
- ];
6446
+ const exampleMessages = getModelInputSnippet(model);
6393
6447
  const messages = opts?.messages ?? exampleMessages;
6394
6448
  const config = {
6395
6449
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
@@ -6489,20 +6543,7 @@ __export(python_exports, {
6489
6543
  });
6490
6544
  var snippetConversational = (model, accessToken, opts) => {
6491
6545
  const streaming = opts?.streaming ?? true;
6492
- const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6493
- {
6494
- role: "user",
6495
- content: [
6496
- {
6497
- type: "image_url",
6498
- image_url: {
6499
- url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6500
- }
6501
- },
6502
- { type: "text", text: "Describe this image in one sentence." }
6503
- ]
6504
- }
6505
- ];
6546
+ const exampleMessages = getModelInputSnippet(model);
6506
6547
  const messages = opts?.messages ?? exampleMessages;
6507
6548
  const messagesStr = stringifyMessages(messages, {
6508
6549
  sep: ",\n ",
@@ -6800,20 +6841,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
6800
6841
  var snippetTextGeneration2 = (model, accessToken, opts) => {
6801
6842
  if (model.tags.includes("conversational")) {
6802
6843
  const streaming = opts?.streaming ?? true;
6803
- const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6804
- {
6805
- role: "user",
6806
- content: [
6807
- {
6808
- type: "image_url",
6809
- image_url: {
6810
- url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6811
- }
6812
- },
6813
- { type: "text", text: "Describe this image in one sentence." }
6814
- ]
6815
- }
6816
- ];
6844
+ const exampleMessages = getModelInputSnippet(model);
6817
6845
  const messages = opts?.messages ?? exampleMessages;
6818
6846
  const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
6819
6847
  const config = {
@@ -7051,6 +7079,47 @@ function hasJsInferenceSnippet(model) {
7051
7079
  return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
7052
7080
  }
7053
7081
 
7082
+ // src/gguf.ts
7083
+ var GGMLQuantizationType = /* @__PURE__ */ ((GGMLQuantizationType2) => {
7084
+ GGMLQuantizationType2[GGMLQuantizationType2["F32"] = 0] = "F32";
7085
+ GGMLQuantizationType2[GGMLQuantizationType2["F16"] = 1] = "F16";
7086
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_0"] = 2] = "Q4_0";
7087
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_1"] = 3] = "Q4_1";
7088
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_0"] = 6] = "Q5_0";
7089
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_1"] = 7] = "Q5_1";
7090
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_0"] = 8] = "Q8_0";
7091
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_1"] = 9] = "Q8_1";
7092
+ GGMLQuantizationType2[GGMLQuantizationType2["Q2_K"] = 10] = "Q2_K";
7093
+ GGMLQuantizationType2[GGMLQuantizationType2["Q3_K"] = 11] = "Q3_K";
7094
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_K"] = 12] = "Q4_K";
7095
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_K"] = 13] = "Q5_K";
7096
+ GGMLQuantizationType2[GGMLQuantizationType2["Q6_K"] = 14] = "Q6_K";
7097
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_K"] = 15] = "Q8_K";
7098
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_XXS"] = 16] = "IQ2_XXS";
7099
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_XS"] = 17] = "IQ2_XS";
7100
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ3_XXS"] = 18] = "IQ3_XXS";
7101
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ1_S"] = 19] = "IQ1_S";
7102
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ4_NL"] = 20] = "IQ4_NL";
7103
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ3_S"] = 21] = "IQ3_S";
7104
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_S"] = 22] = "IQ2_S";
7105
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ4_XS"] = 23] = "IQ4_XS";
7106
+ GGMLQuantizationType2[GGMLQuantizationType2["I8"] = 24] = "I8";
7107
+ GGMLQuantizationType2[GGMLQuantizationType2["I16"] = 25] = "I16";
7108
+ GGMLQuantizationType2[GGMLQuantizationType2["I32"] = 26] = "I32";
7109
+ GGMLQuantizationType2[GGMLQuantizationType2["I64"] = 27] = "I64";
7110
+ GGMLQuantizationType2[GGMLQuantizationType2["F64"] = 28] = "F64";
7111
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ1_M"] = 29] = "IQ1_M";
7112
+ GGMLQuantizationType2[GGMLQuantizationType2["BF16"] = 30] = "BF16";
7113
+ return GGMLQuantizationType2;
7114
+ })(GGMLQuantizationType || {});
7115
+ var ggufQuants = Object.values(GGMLQuantizationType).filter((v) => typeof v === "string");
7116
+ var GGUF_QUANT_RE = new RegExp(`(?<quant>${ggufQuants.join("|")})(_(?<sizeVariation>[A-Z]+))?`);
7117
+ var GGUF_QUANT_RE_GLOBAL = new RegExp(GGUF_QUANT_RE, "g");
7118
+ function parseGGUFQuantLabel(fname) {
7119
+ const quantLabel = fname.toUpperCase().match(GGUF_QUANT_RE_GLOBAL)?.at(-1);
7120
+ return quantLabel;
7121
+ }
7122
+
7054
7123
  // src/hardware.ts
7055
7124
  var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL = 10 ** 14;
7056
7125
  var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL_BIOLOGY = 10 ** 11;
@@ -7509,7 +7578,6 @@ var SKUS = {
7509
7578
  };
7510
7579
 
7511
7580
  // src/local-apps.ts
7512
- var import_gguf = require("@huggingface/gguf");
7513
7581
  function isAwqModel(model) {
7514
7582
  return model.config?.quantization_config?.quant_method === "awq";
7515
7583
  }
@@ -7587,7 +7655,7 @@ var snippetNodeLlamaCppCli = (model, filepath) => {
7587
7655
  };
7588
7656
  var snippetOllama = (model, filepath) => {
7589
7657
  if (filepath) {
7590
- const quantLabel = (0, import_gguf.parseGGUFQuantLabel)(filepath);
7658
+ const quantLabel = parseGGUFQuantLabel(filepath);
7591
7659
  const ollamatag = quantLabel ? `:${quantLabel}` : "";
7592
7660
  return `ollama run hf.co/${model.id}${ollamatag}`;
7593
7661
  }
@@ -7891,6 +7959,9 @@ var DATASET_LIBRARIES_UI_ELEMENTS = {
7891
7959
  ALL_MODEL_LIBRARY_KEYS,
7892
7960
  DATASET_LIBRARIES_UI_ELEMENTS,
7893
7961
  DEFAULT_MEMORY_OPTIONS,
7962
+ GGMLQuantizationType,
7963
+ GGUF_QUANT_RE,
7964
+ GGUF_QUANT_RE_GLOBAL,
7894
7965
  LIBRARY_TASK_MAPPING,
7895
7966
  LOCAL_APPS,
7896
7967
  MAPPING_DEFAULT_WIDGET,
@@ -7905,5 +7976,6 @@ var DATASET_LIBRARIES_UI_ELEMENTS = {
7905
7976
  SUBTASK_TYPES,
7906
7977
  TASKS_DATA,
7907
7978
  TASKS_MODEL_LIBRARIES,
7979
+ parseGGUFQuantLabel,
7908
7980
  snippets
7909
7981
  });
package/dist/index.js CHANGED
@@ -757,7 +757,7 @@ var MAPPING_DEFAULT_WIDGET = /* @__PURE__ */ new Map([
757
757
  ]);
758
758
 
759
759
  // src/pipelines.ts
760
- var MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
760
+ var MODALITIES = ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];
761
761
  var MODALITY_LABELS = {
762
762
  multimodal: "Multimodal",
763
763
  nlp: "Natural Language Processing",
@@ -4797,9 +4797,9 @@ var gliner = (model) => [
4797
4797
  model = GLiNER.from_pretrained("${model.id}")`
4798
4798
  ];
4799
4799
  var keras = (model) => [
4800
- `# Available backend options are: "jax", "tensorflow", "torch".
4800
+ `# Available backend options are: "jax", "torch", "tensorflow".
4801
4801
  import os
4802
- os.environ["KERAS_BACKEND"] = "tensorflow"
4802
+ os.environ["KERAS_BACKEND"] = "jax"
4803
4803
 
4804
4804
  import keras
4805
4805
 
@@ -4807,9 +4807,9 @@ model = keras.saving.load_model("hf://${model.id}")
4807
4807
  `
4808
4808
  ];
4809
4809
  var keras_nlp = (model) => [
4810
- `# Available backend options are: "jax", "tensorflow", "torch".
4810
+ `# Available backend options are: "jax", "torch", "tensorflow".
4811
4811
  import os
4812
- os.environ["KERAS_BACKEND"] = "tensorflow"
4812
+ os.environ["KERAS_BACKEND"] = "jax"
4813
4813
 
4814
4814
  import keras_nlp
4815
4815
 
@@ -4817,6 +4817,20 @@ tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}")
4817
4817
  backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
4818
4818
  `
4819
4819
  ];
4820
+ var keras_hub = (model) => [
4821
+ `# Available backend options are: "jax", "torch", "tensorflow".
4822
+ import os
4823
+ os.environ["KERAS_BACKEND"] = "jax"
4824
+
4825
+ import keras_hub
4826
+
4827
+ # Load a task-specific model (*replace CausalLM with your task*)
4828
+ model = keras_hub.models.CausalLM.from_preset("hf://${model.id}", dtype="bfloat16")
4829
+
4830
+ # Possible tasks are CausalLM, TextToImage, ImageClassifier, ...
4831
+ # full list here: https://keras.io/api/keras_hub/models/#api-documentation
4832
+ `
4833
+ ];
4820
4834
  var llama_cpp_python = (model) => [
4821
4835
  `from llama_cpp import Llama
4822
4836
 
@@ -5731,16 +5745,23 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5731
5745
  repoUrl: "https://github.com/keras-team/tf-keras",
5732
5746
  docsUrl: "https://huggingface.co/docs/hub/tf-keras",
5733
5747
  snippets: tf_keras,
5734
- filter: true,
5735
5748
  countDownloads: `path:"saved_model.pb"`
5736
5749
  },
5737
5750
  "keras-nlp": {
5738
5751
  prettyLabel: "KerasNLP",
5739
5752
  repoName: "KerasNLP",
5740
- repoUrl: "https://keras.io/keras_nlp/",
5741
- docsUrl: "https://github.com/keras-team/keras-nlp",
5753
+ repoUrl: "https://github.com/keras-team/keras-nlp",
5754
+ docsUrl: "https://keras.io/keras_nlp/",
5742
5755
  snippets: keras_nlp
5743
5756
  },
5757
+ "keras-hub": {
5758
+ prettyLabel: "KerasHub",
5759
+ repoName: "KerasHub",
5760
+ repoUrl: "https://github.com/keras-team/keras-hub",
5761
+ docsUrl: "https://keras.io/keras_hub/",
5762
+ snippets: keras_hub,
5763
+ filter: true
5764
+ },
5744
5765
  k2: {
5745
5766
  prettyLabel: "K2",
5746
5767
  repoName: "k2",
@@ -5759,6 +5780,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5759
5780
  repoUrl: "https://github.com/abetlen/llama-cpp-python",
5760
5781
  snippets: llama_cpp_python
5761
5782
  },
5783
+ "mini-omni2": {
5784
+ prettyLabel: "Mini-Omni2",
5785
+ repoName: "Mini-Omni2",
5786
+ repoUrl: "https://github.com/gpt-omni/mini-omni2",
5787
+ countDownloads: `path:"model_config.yaml"`
5788
+ },
5762
5789
  mindspore: {
5763
5790
  prettyLabel: "MindSpore",
5764
5791
  repoName: "mindspore",
@@ -5842,6 +5869,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5842
5869
  filter: true,
5843
5870
  countDownloads: `path_extension:"nemo" OR path:"model_config.yaml"`
5844
5871
  },
5872
+ "open-oasis": {
5873
+ prettyLabel: "open-oasis",
5874
+ repoName: "open-oasis",
5875
+ repoUrl: "https://github.com/etched-ai/open-oasis",
5876
+ countDownloads: `path:"oasis500m.pt"`
5877
+ },
5845
5878
  open_clip: {
5846
5879
  prettyLabel: "OpenCLIP",
5847
5880
  repoName: "OpenCLIP",
@@ -6066,6 +6099,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
6066
6099
  filter: false,
6067
6100
  countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`
6068
6101
  },
6102
+ genmo: {
6103
+ prettyLabel: "Genmo",
6104
+ repoName: "Genmo",
6105
+ repoUrl: "https://github.com/genmoai/models",
6106
+ filter: false,
6107
+ countDownloads: `path:"vae_stats.json"`
6108
+ },
6069
6109
  tensorflowtts: {
6070
6110
  prettyLabel: "TensorFlowTTS",
6071
6111
  repoName: "TensorFlowTTS",
@@ -6212,7 +6252,28 @@ var inputsQuestionAnswering = () => `{
6212
6252
  }`;
6213
6253
  var inputsTextClassification = () => `"I like you. I love you"`;
6214
6254
  var inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
6215
- var inputsTextGeneration = () => `"Can you please let us know more details about your "`;
6255
+ var inputsTextGeneration = (model) => {
6256
+ if (model.tags.includes("conversational")) {
6257
+ return model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6258
+ {
6259
+ role: "user",
6260
+ content: [
6261
+ {
6262
+ type: "text",
6263
+ text: "Describe this image in one sentence."
6264
+ },
6265
+ {
6266
+ type: "image_url",
6267
+ image_url: {
6268
+ url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6269
+ }
6270
+ }
6271
+ ]
6272
+ }
6273
+ ];
6274
+ }
6275
+ return `"Can you please let us know more details about your "`;
6276
+ };
6216
6277
  var inputsText2TextGeneration = () => `"The answer to the universe is"`;
6217
6278
  var inputsFillMask = (model) => `"The answer to the universe is ${model.mask_token}."`;
6218
6279
  var inputsSentenceSimilarity = () => `{
@@ -6269,13 +6330,15 @@ function getModelInputSnippet(model, noWrap = false, noQuotes = false) {
6269
6330
  const inputs = modelInputSnippets[model.pipeline_tag];
6270
6331
  if (inputs) {
6271
6332
  let result = inputs(model);
6272
- if (noWrap) {
6273
- result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
6274
- }
6275
- if (noQuotes) {
6276
- const REGEX_QUOTES = /^"(.+)"$/s;
6277
- const match = result.match(REGEX_QUOTES);
6278
- result = match ? match[1] : result;
6333
+ if (typeof result === "string") {
6334
+ if (noWrap) {
6335
+ result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
6336
+ }
6337
+ if (noQuotes) {
6338
+ const REGEX_QUOTES = /^"(.+)"$/s;
6339
+ const match = result.match(REGEX_QUOTES);
6340
+ result = match ? match[1] : result;
6341
+ }
6279
6342
  }
6280
6343
  return result;
6281
6344
  }
@@ -6317,7 +6380,7 @@ function stringifyMessages(messages, opts) {
6317
6380
  if (opts.customContentEscaper) {
6318
6381
  content = opts.customContentEscaper(content);
6319
6382
  }
6320
- return `{ ${keyRole}: "${role}", ${keyContent}: ${content} }`;
6383
+ return `{ ${keyRole}: "${role}", ${keyContent}: [${content}] }`;
6321
6384
  }
6322
6385
  });
6323
6386
  return opts.start + messagesStringified.join(opts.sep) + opts.end;
@@ -6338,20 +6401,7 @@ var snippetBasic = (model, accessToken) => ({
6338
6401
  var snippetTextGeneration = (model, accessToken, opts) => {
6339
6402
  if (model.tags.includes("conversational")) {
6340
6403
  const streaming = opts?.streaming ?? true;
6341
- const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6342
- {
6343
- role: "user",
6344
- content: [
6345
- {
6346
- type: "image_url",
6347
- image_url: {
6348
- url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6349
- }
6350
- },
6351
- { type: "text", text: "Describe this image in one sentence." }
6352
- ]
6353
- }
6354
- ];
6404
+ const exampleMessages = getModelInputSnippet(model);
6355
6405
  const messages = opts?.messages ?? exampleMessages;
6356
6406
  const config = {
6357
6407
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
@@ -6451,20 +6501,7 @@ __export(python_exports, {
6451
6501
  });
6452
6502
  var snippetConversational = (model, accessToken, opts) => {
6453
6503
  const streaming = opts?.streaming ?? true;
6454
- const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6455
- {
6456
- role: "user",
6457
- content: [
6458
- {
6459
- type: "image_url",
6460
- image_url: {
6461
- url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6462
- }
6463
- },
6464
- { type: "text", text: "Describe this image in one sentence." }
6465
- ]
6466
- }
6467
- ];
6504
+ const exampleMessages = getModelInputSnippet(model);
6468
6505
  const messages = opts?.messages ?? exampleMessages;
6469
6506
  const messagesStr = stringifyMessages(messages, {
6470
6507
  sep: ",\n ",
@@ -6762,20 +6799,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
6762
6799
  var snippetTextGeneration2 = (model, accessToken, opts) => {
6763
6800
  if (model.tags.includes("conversational")) {
6764
6801
  const streaming = opts?.streaming ?? true;
6765
- const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6766
- {
6767
- role: "user",
6768
- content: [
6769
- {
6770
- type: "image_url",
6771
- image_url: {
6772
- url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6773
- }
6774
- },
6775
- { type: "text", text: "Describe this image in one sentence." }
6776
- ]
6777
- }
6778
- ];
6802
+ const exampleMessages = getModelInputSnippet(model);
6779
6803
  const messages = opts?.messages ?? exampleMessages;
6780
6804
  const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
6781
6805
  const config = {
@@ -7013,6 +7037,47 @@ function hasJsInferenceSnippet(model) {
7013
7037
  return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
7014
7038
  }
7015
7039
 
7040
+ // src/gguf.ts
7041
+ var GGMLQuantizationType = /* @__PURE__ */ ((GGMLQuantizationType2) => {
7042
+ GGMLQuantizationType2[GGMLQuantizationType2["F32"] = 0] = "F32";
7043
+ GGMLQuantizationType2[GGMLQuantizationType2["F16"] = 1] = "F16";
7044
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_0"] = 2] = "Q4_0";
7045
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_1"] = 3] = "Q4_1";
7046
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_0"] = 6] = "Q5_0";
7047
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_1"] = 7] = "Q5_1";
7048
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_0"] = 8] = "Q8_0";
7049
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_1"] = 9] = "Q8_1";
7050
+ GGMLQuantizationType2[GGMLQuantizationType2["Q2_K"] = 10] = "Q2_K";
7051
+ GGMLQuantizationType2[GGMLQuantizationType2["Q3_K"] = 11] = "Q3_K";
7052
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_K"] = 12] = "Q4_K";
7053
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_K"] = 13] = "Q5_K";
7054
+ GGMLQuantizationType2[GGMLQuantizationType2["Q6_K"] = 14] = "Q6_K";
7055
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_K"] = 15] = "Q8_K";
7056
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_XXS"] = 16] = "IQ2_XXS";
7057
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_XS"] = 17] = "IQ2_XS";
7058
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ3_XXS"] = 18] = "IQ3_XXS";
7059
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ1_S"] = 19] = "IQ1_S";
7060
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ4_NL"] = 20] = "IQ4_NL";
7061
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ3_S"] = 21] = "IQ3_S";
7062
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_S"] = 22] = "IQ2_S";
7063
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ4_XS"] = 23] = "IQ4_XS";
7064
+ GGMLQuantizationType2[GGMLQuantizationType2["I8"] = 24] = "I8";
7065
+ GGMLQuantizationType2[GGMLQuantizationType2["I16"] = 25] = "I16";
7066
+ GGMLQuantizationType2[GGMLQuantizationType2["I32"] = 26] = "I32";
7067
+ GGMLQuantizationType2[GGMLQuantizationType2["I64"] = 27] = "I64";
7068
+ GGMLQuantizationType2[GGMLQuantizationType2["F64"] = 28] = "F64";
7069
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ1_M"] = 29] = "IQ1_M";
7070
+ GGMLQuantizationType2[GGMLQuantizationType2["BF16"] = 30] = "BF16";
7071
+ return GGMLQuantizationType2;
7072
+ })(GGMLQuantizationType || {});
7073
+ var ggufQuants = Object.values(GGMLQuantizationType).filter((v) => typeof v === "string");
7074
+ var GGUF_QUANT_RE = new RegExp(`(?<quant>${ggufQuants.join("|")})(_(?<sizeVariation>[A-Z]+))?`);
7075
+ var GGUF_QUANT_RE_GLOBAL = new RegExp(GGUF_QUANT_RE, "g");
7076
+ function parseGGUFQuantLabel(fname) {
7077
+ const quantLabel = fname.toUpperCase().match(GGUF_QUANT_RE_GLOBAL)?.at(-1);
7078
+ return quantLabel;
7079
+ }
7080
+
7016
7081
  // src/hardware.ts
7017
7082
  var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL = 10 ** 14;
7018
7083
  var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL_BIOLOGY = 10 ** 11;
@@ -7471,7 +7536,6 @@ var SKUS = {
7471
7536
  };
7472
7537
 
7473
7538
  // src/local-apps.ts
7474
- import { parseGGUFQuantLabel } from "@huggingface/gguf";
7475
7539
  function isAwqModel(model) {
7476
7540
  return model.config?.quantization_config?.quant_method === "awq";
7477
7541
  }
@@ -7852,6 +7916,9 @@ export {
7852
7916
  ALL_MODEL_LIBRARY_KEYS,
7853
7917
  DATASET_LIBRARIES_UI_ELEMENTS,
7854
7918
  DEFAULT_MEMORY_OPTIONS,
7919
+ GGMLQuantizationType,
7920
+ GGUF_QUANT_RE,
7921
+ GGUF_QUANT_RE_GLOBAL,
7855
7922
  LIBRARY_TASK_MAPPING,
7856
7923
  LOCAL_APPS,
7857
7924
  MAPPING_DEFAULT_WIDGET,
@@ -7866,5 +7933,6 @@ export {
7866
7933
  SUBTASK_TYPES,
7867
7934
  TASKS_DATA,
7868
7935
  TASKS_MODEL_LIBRARIES,
7936
+ parseGGUFQuantLabel,
7869
7937
  snippets_exports as snippets
7870
7938
  };