@huggingface/tasks 0.12.28 → 0.12.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/index.cjs +113 -59
  2. package/dist/index.js +108 -58
  3. package/dist/src/gguf.d.ts +35 -0
  4. package/dist/src/gguf.d.ts.map +1 -0
  5. package/dist/src/index.d.ts +1 -0
  6. package/dist/src/index.d.ts.map +1 -1
  7. package/dist/src/local-apps.d.ts.map +1 -1
  8. package/dist/src/model-libraries-snippets.d.ts +1 -0
  9. package/dist/src/model-libraries-snippets.d.ts.map +1 -1
  10. package/dist/src/model-libraries.d.ts +23 -3
  11. package/dist/src/model-libraries.d.ts.map +1 -1
  12. package/dist/src/snippets/common.d.ts +6 -12
  13. package/dist/src/snippets/common.d.ts.map +1 -1
  14. package/dist/src/snippets/curl.d.ts.map +1 -1
  15. package/dist/src/snippets/curl.spec.d.ts +2 -0
  16. package/dist/src/snippets/curl.spec.d.ts.map +1 -0
  17. package/dist/src/snippets/inputs.d.ts.map +1 -1
  18. package/dist/src/snippets/js.d.ts.map +1 -1
  19. package/dist/src/snippets/js.spec.d.ts +2 -0
  20. package/dist/src/snippets/js.spec.d.ts.map +1 -0
  21. package/dist/src/snippets/python.d.ts.map +1 -1
  22. package/dist/src/snippets/python.spec.d.ts +2 -0
  23. package/dist/src/snippets/python.spec.d.ts.map +1 -0
  24. package/package.json +2 -4
  25. package/src/gguf.ts +40 -0
  26. package/src/index.ts +2 -0
  27. package/src/local-apps.ts +1 -1
  28. package/src/model-libraries-snippets.ts +19 -4
  29. package/src/model-libraries.ts +23 -3
  30. package/src/snippets/common.ts +27 -51
  31. package/src/snippets/curl.spec.ts +68 -0
  32. package/src/snippets/curl.ts +2 -6
  33. package/src/snippets/inputs.ts +1 -0
  34. package/src/snippets/js.spec.ts +86 -0
  35. package/src/snippets/js.ts +2 -4
  36. package/src/snippets/python.spec.ts +78 -0
  37. package/src/snippets/python.ts +4 -11
package/dist/index.cjs CHANGED
@@ -24,6 +24,9 @@ __export(src_exports, {
24
24
  ALL_MODEL_LIBRARY_KEYS: () => ALL_MODEL_LIBRARY_KEYS,
25
25
  DATASET_LIBRARIES_UI_ELEMENTS: () => DATASET_LIBRARIES_UI_ELEMENTS,
26
26
  DEFAULT_MEMORY_OPTIONS: () => DEFAULT_MEMORY_OPTIONS,
27
+ GGMLQuantizationType: () => GGMLQuantizationType,
28
+ GGUF_QUANT_RE: () => GGUF_QUANT_RE,
29
+ GGUF_QUANT_RE_GLOBAL: () => GGUF_QUANT_RE_GLOBAL,
27
30
  LIBRARY_TASK_MAPPING: () => LIBRARY_TASK_MAPPING,
28
31
  LOCAL_APPS: () => LOCAL_APPS,
29
32
  MAPPING_DEFAULT_WIDGET: () => MAPPING_DEFAULT_WIDGET,
@@ -38,6 +41,7 @@ __export(src_exports, {
38
41
  SUBTASK_TYPES: () => SUBTASK_TYPES,
39
42
  TASKS_DATA: () => TASKS_DATA,
40
43
  TASKS_MODEL_LIBRARIES: () => TASKS_MODEL_LIBRARIES,
44
+ parseGGUFQuantLabel: () => parseGGUFQuantLabel,
41
45
  snippets: () => snippets_exports
42
46
  });
43
47
  module.exports = __toCommonJS(src_exports);
@@ -4835,9 +4839,9 @@ var gliner = (model) => [
4835
4839
  model = GLiNER.from_pretrained("${model.id}")`
4836
4840
  ];
4837
4841
  var keras = (model) => [
4838
- `# Available backend options are: "jax", "tensorflow", "torch".
4842
+ `# Available backend options are: "jax", "torch", "tensorflow".
4839
4843
  import os
4840
- os.environ["KERAS_BACKEND"] = "tensorflow"
4844
+ os.environ["KERAS_BACKEND"] = "jax"
4841
4845
 
4842
4846
  import keras
4843
4847
 
@@ -4845,9 +4849,9 @@ model = keras.saving.load_model("hf://${model.id}")
4845
4849
  `
4846
4850
  ];
4847
4851
  var keras_nlp = (model) => [
4848
- `# Available backend options are: "jax", "tensorflow", "torch".
4852
+ `# Available backend options are: "jax", "torch", "tensorflow".
4849
4853
  import os
4850
- os.environ["KERAS_BACKEND"] = "tensorflow"
4854
+ os.environ["KERAS_BACKEND"] = "jax"
4851
4855
 
4852
4856
  import keras_nlp
4853
4857
 
@@ -4855,6 +4859,20 @@ tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}")
4855
4859
  backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
4856
4860
  `
4857
4861
  ];
4862
+ var keras_hub = (model) => [
4863
+ `# Available backend options are: "jax", "torch", "tensorflow".
4864
+ import os
4865
+ os.environ["KERAS_BACKEND"] = "jax"
4866
+
4867
+ import keras_hub
4868
+
4869
+ # Load a task-specific model (*replace CausalLM with your task*)
4870
+ model = keras_hub.models.CausalLM.from_preset("hf://${model.id}", dtype="bfloat16")
4871
+
4872
+ # Possible tasks are CausalLM, TextToImage, ImageClassifier, ...
4873
+ # full list here: https://keras.io/api/keras_hub/models/#api-documentation
4874
+ `
4875
+ ];
4858
4876
  var llama_cpp_python = (model) => [
4859
4877
  `from llama_cpp import Llama
4860
4878
 
@@ -5769,16 +5787,23 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5769
5787
  repoUrl: "https://github.com/keras-team/tf-keras",
5770
5788
  docsUrl: "https://huggingface.co/docs/hub/tf-keras",
5771
5789
  snippets: tf_keras,
5772
- filter: true,
5773
5790
  countDownloads: `path:"saved_model.pb"`
5774
5791
  },
5775
5792
  "keras-nlp": {
5776
5793
  prettyLabel: "KerasNLP",
5777
5794
  repoName: "KerasNLP",
5778
- repoUrl: "https://keras.io/keras_nlp/",
5779
- docsUrl: "https://github.com/keras-team/keras-nlp",
5795
+ repoUrl: "https://github.com/keras-team/keras-nlp",
5796
+ docsUrl: "https://keras.io/keras_nlp/",
5780
5797
  snippets: keras_nlp
5781
5798
  },
5799
+ "keras-hub": {
5800
+ prettyLabel: "KerasHub",
5801
+ repoName: "KerasHub",
5802
+ repoUrl: "https://github.com/keras-team/keras-hub",
5803
+ docsUrl: "https://keras.io/keras_hub/",
5804
+ snippets: keras_hub,
5805
+ filter: true
5806
+ },
5782
5807
  k2: {
5783
5808
  prettyLabel: "K2",
5784
5809
  repoName: "k2",
@@ -5886,6 +5911,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5886
5911
  filter: true,
5887
5912
  countDownloads: `path_extension:"nemo" OR path:"model_config.yaml"`
5888
5913
  },
5914
+ "open-oasis": {
5915
+ prettyLabel: "open-oasis",
5916
+ repoName: "open-oasis",
5917
+ repoUrl: "https://github.com/etched-ai/open-oasis",
5918
+ countDownloads: `path:"oasis500m.pt"`
5919
+ },
5889
5920
  open_clip: {
5890
5921
  prettyLabel: "OpenCLIP",
5891
5922
  repoName: "OpenCLIP",
@@ -6110,6 +6141,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
6110
6141
  filter: false,
6111
6142
  countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`
6112
6143
  },
6144
+ genmo: {
6145
+ prettyLabel: "Genmo",
6146
+ repoName: "Genmo",
6147
+ repoUrl: "https://github.com/genmoai/models",
6148
+ filter: false,
6149
+ countDownloads: `path:"vae_stats.json"`
6150
+ },
6113
6151
  tensorflowtts: {
6114
6152
  prettyLabel: "TensorFlowTTS",
6115
6153
  repoName: "TensorFlowTTS",
@@ -6320,6 +6358,7 @@ var modelInputSnippets = {
6320
6358
  "tabular-classification": inputsTabularPrediction,
6321
6359
  "text-classification": inputsTextClassification,
6322
6360
  "text-generation": inputsTextGeneration,
6361
+ "image-text-to-text": inputsTextGeneration,
6323
6362
  "text-to-image": inputsTextToImage,
6324
6363
  "text-to-speech": inputsTextToSpeech,
6325
6364
  "text-to-audio": inputsTextToAudio,
@@ -6364,34 +6403,22 @@ __export(curl_exports, {
6364
6403
 
6365
6404
  // src/snippets/common.ts
6366
6405
  function stringifyMessages(messages, opts) {
6367
- const keyRole = opts.attributeKeyQuotes ? `"role"` : "role";
6368
- const keyContent = opts.attributeKeyQuotes ? `"content"` : "content";
6369
- const messagesStringified = messages.map(({ role, content }) => {
6370
- if (typeof content === "string") {
6371
- content = JSON.stringify(content).slice(1, -1);
6372
- if (opts.customContentEscaper) {
6373
- content = opts.customContentEscaper(content);
6374
- }
6375
- return `{ ${keyRole}: "${role}", ${keyContent}: "${content}" }`;
6376
- } else {
6377
- 2;
6378
- content = content.map(({ image_url, text, type }) => ({
6379
- type,
6380
- image_url,
6381
- ...text ? { text: JSON.stringify(text).slice(1, -1) } : void 0
6382
- }));
6383
- content = JSON.stringify(content).slice(1, -1);
6384
- if (opts.customContentEscaper) {
6385
- content = opts.customContentEscaper(content);
6386
- }
6387
- return `{ ${keyRole}: "${role}", ${keyContent}: ${content} }`;
6388
- }
6389
- });
6390
- return opts.start + messagesStringified.join(opts.sep) + opts.end;
6406
+ let messagesStr = JSON.stringify(messages, null, " ");
6407
+ if (opts?.indent) {
6408
+ messagesStr = messagesStr.replaceAll("\n", `
6409
+ ${opts.indent}`);
6410
+ }
6411
+ if (!opts?.attributeKeyQuotes) {
6412
+ messagesStr = messagesStr.replace(/"([^"]+)":/g, "$1:");
6413
+ }
6414
+ if (opts?.customContentEscaper) {
6415
+ messagesStr = opts.customContentEscaper(messagesStr);
6416
+ }
6417
+ return messagesStr;
6391
6418
  }
6392
6419
  function stringifyGenerationConfig(config, opts) {
6393
6420
  const quote = opts.attributeKeyQuotes ? `"` : "";
6394
- return opts.start + Object.entries(config).map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`).join(opts.sep) + opts.end;
6421
+ return Object.entries(config).map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`).join(`,${opts.indent}`);
6395
6422
  }
6396
6423
 
6397
6424
  // src/snippets/curl.ts
@@ -6419,18 +6446,12 @@ var snippetTextGeneration = (model, accessToken, opts) => {
6419
6446
  --data '{
6420
6447
  "model": "${model.id}",
6421
6448
  "messages": ${stringifyMessages(messages, {
6422
- sep: ",\n ",
6423
- start: `[
6424
- `,
6425
- end: `
6426
- ]`,
6449
+ indent: " ",
6427
6450
  attributeKeyQuotes: true,
6428
6451
  customContentEscaper: (str) => str.replace(/'/g, "'\\''")
6429
6452
  })},
6430
6453
  ${stringifyGenerationConfig(config, {
6431
- sep: ",\n ",
6432
- start: "",
6433
- end: "",
6454
+ indent: "\n ",
6434
6455
  attributeKeyQuotes: true,
6435
6456
  attributeValueConnector: ": "
6436
6457
  })},
@@ -6507,23 +6528,14 @@ var snippetConversational = (model, accessToken, opts) => {
6507
6528
  const streaming = opts?.streaming ?? true;
6508
6529
  const exampleMessages = getModelInputSnippet(model);
6509
6530
  const messages = opts?.messages ?? exampleMessages;
6510
- const messagesStr = stringifyMessages(messages, {
6511
- sep: ",\n ",
6512
- start: `[
6513
- `,
6514
- end: `
6515
- ]`,
6516
- attributeKeyQuotes: true
6517
- });
6531
+ const messagesStr = stringifyMessages(messages, { attributeKeyQuotes: true });
6518
6532
  const config = {
6519
6533
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6520
6534
  max_tokens: opts?.max_tokens ?? 500,
6521
6535
  ...opts?.top_p ? { top_p: opts.top_p } : void 0
6522
6536
  };
6523
6537
  const configStr = stringifyGenerationConfig(config, {
6524
- sep: ",\n ",
6525
- start: "",
6526
- end: "",
6538
+ indent: "\n ",
6527
6539
  attributeValueConnector: "="
6528
6540
  });
6529
6541
  if (streaming) {
@@ -6544,7 +6556,7 @@ stream = client.chat.completions.create(
6544
6556
  )
6545
6557
 
6546
6558
  for chunk in stream:
6547
- print(chunk.choices[0].delta.content)`
6559
+ print(chunk.choices[0].delta.content, end="")`
6548
6560
  },
6549
6561
  {
6550
6562
  client: "openai",
@@ -6565,7 +6577,7 @@ stream = client.chat.completions.create(
6565
6577
  )
6566
6578
 
6567
6579
  for chunk in stream:
6568
- print(chunk.choices[0].delta.content)`
6580
+ print(chunk.choices[0].delta.content, end="")`
6569
6581
  }
6570
6582
  ];
6571
6583
  } else {
@@ -6805,16 +6817,14 @@ var snippetTextGeneration2 = (model, accessToken, opts) => {
6805
6817
  const streaming = opts?.streaming ?? true;
6806
6818
  const exampleMessages = getModelInputSnippet(model);
6807
6819
  const messages = opts?.messages ?? exampleMessages;
6808
- const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
6820
+ const messagesStr = stringifyMessages(messages, { indent: " " });
6809
6821
  const config = {
6810
6822
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6811
6823
  max_tokens: opts?.max_tokens ?? 500,
6812
6824
  ...opts?.top_p ? { top_p: opts.top_p } : void 0
6813
6825
  };
6814
6826
  const configStr = stringifyGenerationConfig(config, {
6815
- sep: ",\n ",
6816
- start: "",
6817
- end: "",
6827
+ indent: "\n ",
6818
6828
  attributeValueConnector: ": "
6819
6829
  });
6820
6830
  if (streaming) {
@@ -7041,6 +7051,47 @@ function hasJsInferenceSnippet(model) {
7041
7051
  return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
7042
7052
  }
7043
7053
 
7054
+ // src/gguf.ts
7055
+ var GGMLQuantizationType = /* @__PURE__ */ ((GGMLQuantizationType2) => {
7056
+ GGMLQuantizationType2[GGMLQuantizationType2["F32"] = 0] = "F32";
7057
+ GGMLQuantizationType2[GGMLQuantizationType2["F16"] = 1] = "F16";
7058
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_0"] = 2] = "Q4_0";
7059
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_1"] = 3] = "Q4_1";
7060
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_0"] = 6] = "Q5_0";
7061
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_1"] = 7] = "Q5_1";
7062
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_0"] = 8] = "Q8_0";
7063
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_1"] = 9] = "Q8_1";
7064
+ GGMLQuantizationType2[GGMLQuantizationType2["Q2_K"] = 10] = "Q2_K";
7065
+ GGMLQuantizationType2[GGMLQuantizationType2["Q3_K"] = 11] = "Q3_K";
7066
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_K"] = 12] = "Q4_K";
7067
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_K"] = 13] = "Q5_K";
7068
+ GGMLQuantizationType2[GGMLQuantizationType2["Q6_K"] = 14] = "Q6_K";
7069
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_K"] = 15] = "Q8_K";
7070
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_XXS"] = 16] = "IQ2_XXS";
7071
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_XS"] = 17] = "IQ2_XS";
7072
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ3_XXS"] = 18] = "IQ3_XXS";
7073
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ1_S"] = 19] = "IQ1_S";
7074
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ4_NL"] = 20] = "IQ4_NL";
7075
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ3_S"] = 21] = "IQ3_S";
7076
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_S"] = 22] = "IQ2_S";
7077
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ4_XS"] = 23] = "IQ4_XS";
7078
+ GGMLQuantizationType2[GGMLQuantizationType2["I8"] = 24] = "I8";
7079
+ GGMLQuantizationType2[GGMLQuantizationType2["I16"] = 25] = "I16";
7080
+ GGMLQuantizationType2[GGMLQuantizationType2["I32"] = 26] = "I32";
7081
+ GGMLQuantizationType2[GGMLQuantizationType2["I64"] = 27] = "I64";
7082
+ GGMLQuantizationType2[GGMLQuantizationType2["F64"] = 28] = "F64";
7083
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ1_M"] = 29] = "IQ1_M";
7084
+ GGMLQuantizationType2[GGMLQuantizationType2["BF16"] = 30] = "BF16";
7085
+ return GGMLQuantizationType2;
7086
+ })(GGMLQuantizationType || {});
7087
+ var ggufQuants = Object.values(GGMLQuantizationType).filter((v) => typeof v === "string");
7088
+ var GGUF_QUANT_RE = new RegExp(`(?<quant>${ggufQuants.join("|")})(_(?<sizeVariation>[A-Z]+))?`);
7089
+ var GGUF_QUANT_RE_GLOBAL = new RegExp(GGUF_QUANT_RE, "g");
7090
+ function parseGGUFQuantLabel(fname) {
7091
+ const quantLabel = fname.toUpperCase().match(GGUF_QUANT_RE_GLOBAL)?.at(-1);
7092
+ return quantLabel;
7093
+ }
7094
+
7044
7095
  // src/hardware.ts
7045
7096
  var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL = 10 ** 14;
7046
7097
  var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL_BIOLOGY = 10 ** 11;
@@ -7499,7 +7550,6 @@ var SKUS = {
7499
7550
  };
7500
7551
 
7501
7552
  // src/local-apps.ts
7502
- var import_gguf = require("@huggingface/gguf");
7503
7553
  function isAwqModel(model) {
7504
7554
  return model.config?.quantization_config?.quant_method === "awq";
7505
7555
  }
@@ -7577,7 +7627,7 @@ var snippetNodeLlamaCppCli = (model, filepath) => {
7577
7627
  };
7578
7628
  var snippetOllama = (model, filepath) => {
7579
7629
  if (filepath) {
7580
- const quantLabel = (0, import_gguf.parseGGUFQuantLabel)(filepath);
7630
+ const quantLabel = parseGGUFQuantLabel(filepath);
7581
7631
  const ollamatag = quantLabel ? `:${quantLabel}` : "";
7582
7632
  return `ollama run hf.co/${model.id}${ollamatag}`;
7583
7633
  }
@@ -7881,6 +7931,9 @@ var DATASET_LIBRARIES_UI_ELEMENTS = {
7881
7931
  ALL_MODEL_LIBRARY_KEYS,
7882
7932
  DATASET_LIBRARIES_UI_ELEMENTS,
7883
7933
  DEFAULT_MEMORY_OPTIONS,
7934
+ GGMLQuantizationType,
7935
+ GGUF_QUANT_RE,
7936
+ GGUF_QUANT_RE_GLOBAL,
7884
7937
  LIBRARY_TASK_MAPPING,
7885
7938
  LOCAL_APPS,
7886
7939
  MAPPING_DEFAULT_WIDGET,
@@ -7895,5 +7948,6 @@ var DATASET_LIBRARIES_UI_ELEMENTS = {
7895
7948
  SUBTASK_TYPES,
7896
7949
  TASKS_DATA,
7897
7950
  TASKS_MODEL_LIBRARIES,
7951
+ parseGGUFQuantLabel,
7898
7952
  snippets
7899
7953
  });
package/dist/index.js CHANGED
@@ -4797,9 +4797,9 @@ var gliner = (model) => [
4797
4797
  model = GLiNER.from_pretrained("${model.id}")`
4798
4798
  ];
4799
4799
  var keras = (model) => [
4800
- `# Available backend options are: "jax", "tensorflow", "torch".
4800
+ `# Available backend options are: "jax", "torch", "tensorflow".
4801
4801
  import os
4802
- os.environ["KERAS_BACKEND"] = "tensorflow"
4802
+ os.environ["KERAS_BACKEND"] = "jax"
4803
4803
 
4804
4804
  import keras
4805
4805
 
@@ -4807,9 +4807,9 @@ model = keras.saving.load_model("hf://${model.id}")
4807
4807
  `
4808
4808
  ];
4809
4809
  var keras_nlp = (model) => [
4810
- `# Available backend options are: "jax", "tensorflow", "torch".
4810
+ `# Available backend options are: "jax", "torch", "tensorflow".
4811
4811
  import os
4812
- os.environ["KERAS_BACKEND"] = "tensorflow"
4812
+ os.environ["KERAS_BACKEND"] = "jax"
4813
4813
 
4814
4814
  import keras_nlp
4815
4815
 
@@ -4817,6 +4817,20 @@ tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}")
4817
4817
  backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
4818
4818
  `
4819
4819
  ];
4820
+ var keras_hub = (model) => [
4821
+ `# Available backend options are: "jax", "torch", "tensorflow".
4822
+ import os
4823
+ os.environ["KERAS_BACKEND"] = "jax"
4824
+
4825
+ import keras_hub
4826
+
4827
+ # Load a task-specific model (*replace CausalLM with your task*)
4828
+ model = keras_hub.models.CausalLM.from_preset("hf://${model.id}", dtype="bfloat16")
4829
+
4830
+ # Possible tasks are CausalLM, TextToImage, ImageClassifier, ...
4831
+ # full list here: https://keras.io/api/keras_hub/models/#api-documentation
4832
+ `
4833
+ ];
4820
4834
  var llama_cpp_python = (model) => [
4821
4835
  `from llama_cpp import Llama
4822
4836
 
@@ -5731,16 +5745,23 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5731
5745
  repoUrl: "https://github.com/keras-team/tf-keras",
5732
5746
  docsUrl: "https://huggingface.co/docs/hub/tf-keras",
5733
5747
  snippets: tf_keras,
5734
- filter: true,
5735
5748
  countDownloads: `path:"saved_model.pb"`
5736
5749
  },
5737
5750
  "keras-nlp": {
5738
5751
  prettyLabel: "KerasNLP",
5739
5752
  repoName: "KerasNLP",
5740
- repoUrl: "https://keras.io/keras_nlp/",
5741
- docsUrl: "https://github.com/keras-team/keras-nlp",
5753
+ repoUrl: "https://github.com/keras-team/keras-nlp",
5754
+ docsUrl: "https://keras.io/keras_nlp/",
5742
5755
  snippets: keras_nlp
5743
5756
  },
5757
+ "keras-hub": {
5758
+ prettyLabel: "KerasHub",
5759
+ repoName: "KerasHub",
5760
+ repoUrl: "https://github.com/keras-team/keras-hub",
5761
+ docsUrl: "https://keras.io/keras_hub/",
5762
+ snippets: keras_hub,
5763
+ filter: true
5764
+ },
5744
5765
  k2: {
5745
5766
  prettyLabel: "K2",
5746
5767
  repoName: "k2",
@@ -5848,6 +5869,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5848
5869
  filter: true,
5849
5870
  countDownloads: `path_extension:"nemo" OR path:"model_config.yaml"`
5850
5871
  },
5872
+ "open-oasis": {
5873
+ prettyLabel: "open-oasis",
5874
+ repoName: "open-oasis",
5875
+ repoUrl: "https://github.com/etched-ai/open-oasis",
5876
+ countDownloads: `path:"oasis500m.pt"`
5877
+ },
5851
5878
  open_clip: {
5852
5879
  prettyLabel: "OpenCLIP",
5853
5880
  repoName: "OpenCLIP",
@@ -6072,6 +6099,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
6072
6099
  filter: false,
6073
6100
  countDownloads: `path_extension:"safetensors" OR path_extension:"pt"`
6074
6101
  },
6102
+ genmo: {
6103
+ prettyLabel: "Genmo",
6104
+ repoName: "Genmo",
6105
+ repoUrl: "https://github.com/genmoai/models",
6106
+ filter: false,
6107
+ countDownloads: `path:"vae_stats.json"`
6108
+ },
6075
6109
  tensorflowtts: {
6076
6110
  prettyLabel: "TensorFlowTTS",
6077
6111
  repoName: "TensorFlowTTS",
@@ -6282,6 +6316,7 @@ var modelInputSnippets = {
6282
6316
  "tabular-classification": inputsTabularPrediction,
6283
6317
  "text-classification": inputsTextClassification,
6284
6318
  "text-generation": inputsTextGeneration,
6319
+ "image-text-to-text": inputsTextGeneration,
6285
6320
  "text-to-image": inputsTextToImage,
6286
6321
  "text-to-speech": inputsTextToSpeech,
6287
6322
  "text-to-audio": inputsTextToAudio,
@@ -6326,34 +6361,22 @@ __export(curl_exports, {
6326
6361
 
6327
6362
  // src/snippets/common.ts
6328
6363
  function stringifyMessages(messages, opts) {
6329
- const keyRole = opts.attributeKeyQuotes ? `"role"` : "role";
6330
- const keyContent = opts.attributeKeyQuotes ? `"content"` : "content";
6331
- const messagesStringified = messages.map(({ role, content }) => {
6332
- if (typeof content === "string") {
6333
- content = JSON.stringify(content).slice(1, -1);
6334
- if (opts.customContentEscaper) {
6335
- content = opts.customContentEscaper(content);
6336
- }
6337
- return `{ ${keyRole}: "${role}", ${keyContent}: "${content}" }`;
6338
- } else {
6339
- 2;
6340
- content = content.map(({ image_url, text, type }) => ({
6341
- type,
6342
- image_url,
6343
- ...text ? { text: JSON.stringify(text).slice(1, -1) } : void 0
6344
- }));
6345
- content = JSON.stringify(content).slice(1, -1);
6346
- if (opts.customContentEscaper) {
6347
- content = opts.customContentEscaper(content);
6348
- }
6349
- return `{ ${keyRole}: "${role}", ${keyContent}: ${content} }`;
6350
- }
6351
- });
6352
- return opts.start + messagesStringified.join(opts.sep) + opts.end;
6364
+ let messagesStr = JSON.stringify(messages, null, " ");
6365
+ if (opts?.indent) {
6366
+ messagesStr = messagesStr.replaceAll("\n", `
6367
+ ${opts.indent}`);
6368
+ }
6369
+ if (!opts?.attributeKeyQuotes) {
6370
+ messagesStr = messagesStr.replace(/"([^"]+)":/g, "$1:");
6371
+ }
6372
+ if (opts?.customContentEscaper) {
6373
+ messagesStr = opts.customContentEscaper(messagesStr);
6374
+ }
6375
+ return messagesStr;
6353
6376
  }
6354
6377
  function stringifyGenerationConfig(config, opts) {
6355
6378
  const quote = opts.attributeKeyQuotes ? `"` : "";
6356
- return opts.start + Object.entries(config).map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`).join(opts.sep) + opts.end;
6379
+ return Object.entries(config).map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`).join(`,${opts.indent}`);
6357
6380
  }
6358
6381
 
6359
6382
  // src/snippets/curl.ts
@@ -6381,18 +6404,12 @@ var snippetTextGeneration = (model, accessToken, opts) => {
6381
6404
  --data '{
6382
6405
  "model": "${model.id}",
6383
6406
  "messages": ${stringifyMessages(messages, {
6384
- sep: ",\n ",
6385
- start: `[
6386
- `,
6387
- end: `
6388
- ]`,
6407
+ indent: " ",
6389
6408
  attributeKeyQuotes: true,
6390
6409
  customContentEscaper: (str) => str.replace(/'/g, "'\\''")
6391
6410
  })},
6392
6411
  ${stringifyGenerationConfig(config, {
6393
- sep: ",\n ",
6394
- start: "",
6395
- end: "",
6412
+ indent: "\n ",
6396
6413
  attributeKeyQuotes: true,
6397
6414
  attributeValueConnector: ": "
6398
6415
  })},
@@ -6469,23 +6486,14 @@ var snippetConversational = (model, accessToken, opts) => {
6469
6486
  const streaming = opts?.streaming ?? true;
6470
6487
  const exampleMessages = getModelInputSnippet(model);
6471
6488
  const messages = opts?.messages ?? exampleMessages;
6472
- const messagesStr = stringifyMessages(messages, {
6473
- sep: ",\n ",
6474
- start: `[
6475
- `,
6476
- end: `
6477
- ]`,
6478
- attributeKeyQuotes: true
6479
- });
6489
+ const messagesStr = stringifyMessages(messages, { attributeKeyQuotes: true });
6480
6490
  const config = {
6481
6491
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6482
6492
  max_tokens: opts?.max_tokens ?? 500,
6483
6493
  ...opts?.top_p ? { top_p: opts.top_p } : void 0
6484
6494
  };
6485
6495
  const configStr = stringifyGenerationConfig(config, {
6486
- sep: ",\n ",
6487
- start: "",
6488
- end: "",
6496
+ indent: "\n ",
6489
6497
  attributeValueConnector: "="
6490
6498
  });
6491
6499
  if (streaming) {
@@ -6506,7 +6514,7 @@ stream = client.chat.completions.create(
6506
6514
  )
6507
6515
 
6508
6516
  for chunk in stream:
6509
- print(chunk.choices[0].delta.content)`
6517
+ print(chunk.choices[0].delta.content, end="")`
6510
6518
  },
6511
6519
  {
6512
6520
  client: "openai",
@@ -6527,7 +6535,7 @@ stream = client.chat.completions.create(
6527
6535
  )
6528
6536
 
6529
6537
  for chunk in stream:
6530
- print(chunk.choices[0].delta.content)`
6538
+ print(chunk.choices[0].delta.content, end="")`
6531
6539
  }
6532
6540
  ];
6533
6541
  } else {
@@ -6767,16 +6775,14 @@ var snippetTextGeneration2 = (model, accessToken, opts) => {
6767
6775
  const streaming = opts?.streaming ?? true;
6768
6776
  const exampleMessages = getModelInputSnippet(model);
6769
6777
  const messages = opts?.messages ?? exampleMessages;
6770
- const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
6778
+ const messagesStr = stringifyMessages(messages, { indent: " " });
6771
6779
  const config = {
6772
6780
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6773
6781
  max_tokens: opts?.max_tokens ?? 500,
6774
6782
  ...opts?.top_p ? { top_p: opts.top_p } : void 0
6775
6783
  };
6776
6784
  const configStr = stringifyGenerationConfig(config, {
6777
- sep: ",\n ",
6778
- start: "",
6779
- end: "",
6785
+ indent: "\n ",
6780
6786
  attributeValueConnector: ": "
6781
6787
  });
6782
6788
  if (streaming) {
@@ -7003,6 +7009,47 @@ function hasJsInferenceSnippet(model) {
7003
7009
  return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
7004
7010
  }
7005
7011
 
7012
+ // src/gguf.ts
7013
+ var GGMLQuantizationType = /* @__PURE__ */ ((GGMLQuantizationType2) => {
7014
+ GGMLQuantizationType2[GGMLQuantizationType2["F32"] = 0] = "F32";
7015
+ GGMLQuantizationType2[GGMLQuantizationType2["F16"] = 1] = "F16";
7016
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_0"] = 2] = "Q4_0";
7017
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_1"] = 3] = "Q4_1";
7018
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_0"] = 6] = "Q5_0";
7019
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_1"] = 7] = "Q5_1";
7020
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_0"] = 8] = "Q8_0";
7021
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_1"] = 9] = "Q8_1";
7022
+ GGMLQuantizationType2[GGMLQuantizationType2["Q2_K"] = 10] = "Q2_K";
7023
+ GGMLQuantizationType2[GGMLQuantizationType2["Q3_K"] = 11] = "Q3_K";
7024
+ GGMLQuantizationType2[GGMLQuantizationType2["Q4_K"] = 12] = "Q4_K";
7025
+ GGMLQuantizationType2[GGMLQuantizationType2["Q5_K"] = 13] = "Q5_K";
7026
+ GGMLQuantizationType2[GGMLQuantizationType2["Q6_K"] = 14] = "Q6_K";
7027
+ GGMLQuantizationType2[GGMLQuantizationType2["Q8_K"] = 15] = "Q8_K";
7028
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_XXS"] = 16] = "IQ2_XXS";
7029
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_XS"] = 17] = "IQ2_XS";
7030
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ3_XXS"] = 18] = "IQ3_XXS";
7031
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ1_S"] = 19] = "IQ1_S";
7032
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ4_NL"] = 20] = "IQ4_NL";
7033
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ3_S"] = 21] = "IQ3_S";
7034
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ2_S"] = 22] = "IQ2_S";
7035
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ4_XS"] = 23] = "IQ4_XS";
7036
+ GGMLQuantizationType2[GGMLQuantizationType2["I8"] = 24] = "I8";
7037
+ GGMLQuantizationType2[GGMLQuantizationType2["I16"] = 25] = "I16";
7038
+ GGMLQuantizationType2[GGMLQuantizationType2["I32"] = 26] = "I32";
7039
+ GGMLQuantizationType2[GGMLQuantizationType2["I64"] = 27] = "I64";
7040
+ GGMLQuantizationType2[GGMLQuantizationType2["F64"] = 28] = "F64";
7041
+ GGMLQuantizationType2[GGMLQuantizationType2["IQ1_M"] = 29] = "IQ1_M";
7042
+ GGMLQuantizationType2[GGMLQuantizationType2["BF16"] = 30] = "BF16";
7043
+ return GGMLQuantizationType2;
7044
+ })(GGMLQuantizationType || {});
7045
+ var ggufQuants = Object.values(GGMLQuantizationType).filter((v) => typeof v === "string");
7046
+ var GGUF_QUANT_RE = new RegExp(`(?<quant>${ggufQuants.join("|")})(_(?<sizeVariation>[A-Z]+))?`);
7047
+ var GGUF_QUANT_RE_GLOBAL = new RegExp(GGUF_QUANT_RE, "g");
7048
+ function parseGGUFQuantLabel(fname) {
7049
+ const quantLabel = fname.toUpperCase().match(GGUF_QUANT_RE_GLOBAL)?.at(-1);
7050
+ return quantLabel;
7051
+ }
7052
+
7006
7053
  // src/hardware.ts
7007
7054
  var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL = 10 ** 14;
7008
7055
  var TFLOPS_THRESHOLD_WHITE_HOUSE_MODEL_TRAINING_TOTAL_BIOLOGY = 10 ** 11;
@@ -7461,7 +7508,6 @@ var SKUS = {
7461
7508
  };
7462
7509
 
7463
7510
  // src/local-apps.ts
7464
- import { parseGGUFQuantLabel } from "@huggingface/gguf";
7465
7511
  function isAwqModel(model) {
7466
7512
  return model.config?.quantization_config?.quant_method === "awq";
7467
7513
  }
@@ -7842,6 +7888,9 @@ export {
7842
7888
  ALL_MODEL_LIBRARY_KEYS,
7843
7889
  DATASET_LIBRARIES_UI_ELEMENTS,
7844
7890
  DEFAULT_MEMORY_OPTIONS,
7891
+ GGMLQuantizationType,
7892
+ GGUF_QUANT_RE,
7893
+ GGUF_QUANT_RE_GLOBAL,
7845
7894
  LIBRARY_TASK_MAPPING,
7846
7895
  LOCAL_APPS,
7847
7896
  MAPPING_DEFAULT_WIDGET,
@@ -7856,5 +7905,6 @@ export {
7856
7905
  SUBTASK_TYPES,
7857
7906
  TASKS_DATA,
7858
7907
  TASKS_MODEL_LIBRARIES,
7908
+ parseGGUFQuantLabel,
7859
7909
  snippets_exports as snippets
7860
7910
  };
@@ -0,0 +1,35 @@
1
+ export declare enum GGMLQuantizationType {
2
+ F32 = 0,
3
+ F16 = 1,
4
+ Q4_0 = 2,
5
+ Q4_1 = 3,
6
+ Q5_0 = 6,
7
+ Q5_1 = 7,
8
+ Q8_0 = 8,
9
+ Q8_1 = 9,
10
+ Q2_K = 10,
11
+ Q3_K = 11,
12
+ Q4_K = 12,
13
+ Q5_K = 13,
14
+ Q6_K = 14,
15
+ Q8_K = 15,
16
+ IQ2_XXS = 16,
17
+ IQ2_XS = 17,
18
+ IQ3_XXS = 18,
19
+ IQ1_S = 19,
20
+ IQ4_NL = 20,
21
+ IQ3_S = 21,
22
+ IQ2_S = 22,
23
+ IQ4_XS = 23,
24
+ I8 = 24,
25
+ I16 = 25,
26
+ I32 = 26,
27
+ I64 = 27,
28
+ F64 = 28,
29
+ IQ1_M = 29,
30
+ BF16 = 30
31
+ }
32
+ export declare const GGUF_QUANT_RE: RegExp;
33
+ export declare const GGUF_QUANT_RE_GLOBAL: RegExp;
34
+ export declare function parseGGUFQuantLabel(fname: string): string | undefined;
35
+ //# sourceMappingURL=gguf.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"gguf.d.ts","sourceRoot":"","sources":["../../src/gguf.ts"],"names":[],"mappings":"AAAA,oBAAY,oBAAoB;IAC/B,GAAG,IAAI;IACP,GAAG,IAAI;IACP,IAAI,IAAI;IACR,IAAI,IAAI;IACR,IAAI,IAAI;IACR,IAAI,IAAI;IACR,IAAI,IAAI;IACR,IAAI,IAAI;IACR,IAAI,KAAK;IACT,IAAI,KAAK;IACT,IAAI,KAAK;IACT,IAAI,KAAK;IACT,IAAI,KAAK;IACT,IAAI,KAAK;IACT,OAAO,KAAK;IACZ,MAAM,KAAK;IACX,OAAO,KAAK;IACZ,KAAK,KAAK;IACV,MAAM,KAAK;IACX,KAAK,KAAK;IACV,KAAK,KAAK;IACV,MAAM,KAAK;IACX,EAAE,KAAK;IACP,GAAG,KAAK;IACR,GAAG,KAAK;IACR,GAAG,KAAK;IACR,GAAG,KAAK;IACR,KAAK,KAAK;IACV,IAAI,KAAK;CACT;AAGD,eAAO,MAAM,aAAa,QAAmF,CAAC;AAC9G,eAAO,MAAM,oBAAoB,QAAiC,CAAC;AAEnE,wBAAgB,mBAAmB,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS,CAGrE"}