@huggingface/tasks 0.12.26 → 0.12.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -795,7 +795,7 @@ var MAPPING_DEFAULT_WIDGET = /* @__PURE__ */ new Map([
795
795
  ]);
796
796
 
797
797
  // src/pipelines.ts
798
- var MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
798
+ var MODALITIES = ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];
799
799
  var MODALITY_LABELS = {
800
800
  multimodal: "Multimodal",
801
801
  nlp: "Natural Language Processing",
@@ -5797,6 +5797,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5797
5797
  repoUrl: "https://github.com/abetlen/llama-cpp-python",
5798
5798
  snippets: llama_cpp_python
5799
5799
  },
5800
+ "mini-omni2": {
5801
+ prettyLabel: "Mini-Omni2",
5802
+ repoName: "Mini-Omni2",
5803
+ repoUrl: "https://github.com/gpt-omni/mini-omni2",
5804
+ countDownloads: `path:"model_config.yaml"`
5805
+ },
5800
5806
  mindspore: {
5801
5807
  prettyLabel: "MindSpore",
5802
5808
  repoName: "mindspore",
@@ -6250,7 +6256,28 @@ var inputsQuestionAnswering = () => `{
6250
6256
  }`;
6251
6257
  var inputsTextClassification = () => `"I like you. I love you"`;
6252
6258
  var inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
6253
- var inputsTextGeneration = () => `"Can you please let us know more details about your "`;
6259
+ var inputsTextGeneration = (model) => {
6260
+ if (model.tags.includes("conversational")) {
6261
+ return model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6262
+ {
6263
+ role: "user",
6264
+ content: [
6265
+ {
6266
+ type: "text",
6267
+ text: "Describe this image in one sentence."
6268
+ },
6269
+ {
6270
+ type: "image_url",
6271
+ image_url: {
6272
+ url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6273
+ }
6274
+ }
6275
+ ]
6276
+ }
6277
+ ];
6278
+ }
6279
+ return `"Can you please let us know more details about your "`;
6280
+ };
6254
6281
  var inputsText2TextGeneration = () => `"The answer to the universe is"`;
6255
6282
  var inputsFillMask = (model) => `"The answer to the universe is ${model.mask_token}."`;
6256
6283
  var inputsSentenceSimilarity = () => `{
@@ -6307,13 +6334,15 @@ function getModelInputSnippet(model, noWrap = false, noQuotes = false) {
6307
6334
  const inputs = modelInputSnippets[model.pipeline_tag];
6308
6335
  if (inputs) {
6309
6336
  let result = inputs(model);
6310
- if (noWrap) {
6311
- result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
6312
- }
6313
- if (noQuotes) {
6314
- const REGEX_QUOTES = /^"(.+)"$/s;
6315
- const match = result.match(REGEX_QUOTES);
6316
- result = match ? match[1] : result;
6337
+ if (typeof result === "string") {
6338
+ if (noWrap) {
6339
+ result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
6340
+ }
6341
+ if (noQuotes) {
6342
+ const REGEX_QUOTES = /^"(.+)"$/s;
6343
+ const match = result.match(REGEX_QUOTES);
6344
+ result = match ? match[1] : result;
6345
+ }
6317
6346
  }
6318
6347
  return result;
6319
6348
  }
@@ -6329,7 +6358,6 @@ __export(curl_exports, {
6329
6358
  hasCurlInferenceSnippet: () => hasCurlInferenceSnippet,
6330
6359
  snippetBasic: () => snippetBasic,
6331
6360
  snippetFile: () => snippetFile,
6332
- snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration,
6333
6361
  snippetTextGeneration: () => snippetTextGeneration,
6334
6362
  snippetZeroShotClassification: () => snippetZeroShotClassification
6335
6363
  });
@@ -6377,9 +6405,8 @@ var snippetBasic = (model, accessToken) => ({
6377
6405
  var snippetTextGeneration = (model, accessToken, opts) => {
6378
6406
  if (model.tags.includes("conversational")) {
6379
6407
  const streaming = opts?.streaming ?? true;
6380
- const messages = opts?.messages ?? [
6381
- { role: "user", content: "What is the capital of France?" }
6382
- ];
6408
+ const exampleMessages = getModelInputSnippet(model);
6409
+ const messages = opts?.messages ?? exampleMessages;
6383
6410
  const config = {
6384
6411
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6385
6412
  max_tokens: opts?.max_tokens ?? 500,
@@ -6414,32 +6441,6 @@ var snippetTextGeneration = (model, accessToken, opts) => {
6414
6441
  return snippetBasic(model, accessToken);
6415
6442
  }
6416
6443
  };
6417
- var snippetImageTextToTextGeneration = (model, accessToken) => {
6418
- if (model.tags.includes("conversational")) {
6419
- return {
6420
- content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
6421
- -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
6422
- -H 'Content-Type: application/json' \\
6423
- -d '{
6424
- "model": "${model.id}",
6425
- "messages": [
6426
- {
6427
- "role": "user",
6428
- "content": [
6429
- {"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
6430
- {"type": "text", "text": "Describe this image in one sentence."}
6431
- ]
6432
- }
6433
- ],
6434
- "max_tokens": 500,
6435
- "stream": false
6436
- }'
6437
- `
6438
- };
6439
- } else {
6440
- return snippetBasic(model, accessToken);
6441
- }
6442
- };
6443
6444
  var snippetZeroShotClassification = (model, accessToken) => ({
6444
6445
  content: `curl https://api-inference.huggingface.co/models/${model.id} \\
6445
6446
  -X POST \\
@@ -6464,7 +6465,7 @@ var curlSnippets = {
6464
6465
  summarization: snippetBasic,
6465
6466
  "feature-extraction": snippetBasic,
6466
6467
  "text-generation": snippetTextGeneration,
6467
- "image-text-to-text": snippetImageTextToTextGeneration,
6468
+ "image-text-to-text": snippetTextGeneration,
6468
6469
  "text2text-generation": snippetBasic,
6469
6470
  "fill-mask": snippetBasic,
6470
6471
  "sentence-similarity": snippetBasic,
@@ -6494,7 +6495,6 @@ __export(python_exports, {
6494
6495
  pythonSnippets: () => pythonSnippets,
6495
6496
  snippetBasic: () => snippetBasic2,
6496
6497
  snippetConversational: () => snippetConversational,
6497
- snippetConversationalWithImage: () => snippetConversationalWithImage,
6498
6498
  snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
6499
6499
  snippetFile: () => snippetFile2,
6500
6500
  snippetTabular: () => snippetTabular,
@@ -6505,9 +6505,8 @@ __export(python_exports, {
6505
6505
  });
6506
6506
  var snippetConversational = (model, accessToken, opts) => {
6507
6507
  const streaming = opts?.streaming ?? true;
6508
- const messages = opts?.messages ?? [
6509
- { role: "user", content: "What is the capital of France?" }
6510
- ];
6508
+ const exampleMessages = getModelInputSnippet(model);
6509
+ const messages = opts?.messages ?? exampleMessages;
6511
6510
  const messagesStr = stringifyMessages(messages, {
6512
6511
  sep: ",\n ",
6513
6512
  start: `[
@@ -6609,29 +6608,6 @@ print(completion.choices[0].message)`
6609
6608
  ];
6610
6609
  }
6611
6610
  };
6612
- var snippetConversationalWithImage = (model, accessToken) => ({
6613
- content: `from huggingface_hub import InferenceClient
6614
-
6615
- client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
6616
-
6617
- image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6618
-
6619
- for message in client.chat_completion(
6620
- model="${model.id}",
6621
- messages=[
6622
- {
6623
- "role": "user",
6624
- "content": [
6625
- {"type": "image_url", "image_url": {"url": image_url}},
6626
- {"type": "text", "text": "Describe this image in one sentence."},
6627
- ],
6628
- }
6629
- ],
6630
- max_tokens=500,
6631
- stream=True,
6632
- ):
6633
- print(message.choices[0].delta.content, end="")`
6634
- });
6635
6611
  var snippetZeroShotClassification2 = (model) => ({
6636
6612
  content: `def query(payload):
6637
6613
  response = requests.post(API_URL, headers=headers, json=payload)
@@ -6749,7 +6725,7 @@ var pythonSnippets = {
6749
6725
  "feature-extraction": snippetBasic2,
6750
6726
  "text-generation": snippetBasic2,
6751
6727
  "text2text-generation": snippetBasic2,
6752
- "image-text-to-text": snippetConversationalWithImage,
6728
+ "image-text-to-text": snippetConversational,
6753
6729
  "fill-mask": snippetBasic2,
6754
6730
  "sentence-similarity": snippetBasic2,
6755
6731
  "automatic-speech-recognition": snippetFile2,
@@ -6768,10 +6744,8 @@ var pythonSnippets = {
6768
6744
  "zero-shot-image-classification": snippetZeroShotImageClassification
6769
6745
  };
6770
6746
  function getPythonInferenceSnippet(model, accessToken, opts) {
6771
- if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
6747
+ if (model.tags.includes("conversational")) {
6772
6748
  return snippetConversational(model, accessToken, opts);
6773
- } else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
6774
- return snippetConversationalWithImage(model, accessToken);
6775
6749
  } else {
6776
6750
  let snippets = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" } : { content: "" };
6777
6751
  snippets = Array.isArray(snippets) ? snippets : [snippets];
@@ -6800,7 +6774,6 @@ __export(js_exports, {
6800
6774
  jsSnippets: () => jsSnippets,
6801
6775
  snippetBasic: () => snippetBasic3,
6802
6776
  snippetFile: () => snippetFile3,
6803
- snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration2,
6804
6777
  snippetTextGeneration: () => snippetTextGeneration2,
6805
6778
  snippetTextToAudio: () => snippetTextToAudio2,
6806
6779
  snippetTextToImage: () => snippetTextToImage2,
@@ -6830,9 +6803,8 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
6830
6803
  var snippetTextGeneration2 = (model, accessToken, opts) => {
6831
6804
  if (model.tags.includes("conversational")) {
6832
6805
  const streaming = opts?.streaming ?? true;
6833
- const messages = opts?.messages ?? [
6834
- { role: "user", content: "What is the capital of France?" }
6835
- ];
6806
+ const exampleMessages = getModelInputSnippet(model);
6807
+ const messages = opts?.messages ?? exampleMessages;
6836
6808
  const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
6837
6809
  const config = {
6838
6810
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
@@ -6935,34 +6907,6 @@ console.log(chatCompletion.choices[0].message);`
6935
6907
  return snippetBasic3(model, accessToken);
6936
6908
  }
6937
6909
  };
6938
- var snippetImageTextToTextGeneration2 = (model, accessToken) => {
6939
- if (model.tags.includes("conversational")) {
6940
- return {
6941
- content: `import { HfInference } from "@huggingface/inference";
6942
-
6943
- const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
6944
- const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
6945
-
6946
- for await (const chunk of inference.chatCompletionStream({
6947
- model: "${model.id}",
6948
- messages: [
6949
- {
6950
- "role": "user",
6951
- "content": [
6952
- {"type": "image_url", "image_url": {"url": imageUrl}},
6953
- {"type": "text", "text": "Describe this image in one sentence."},
6954
- ],
6955
- }
6956
- ],
6957
- max_tokens: 500,
6958
- })) {
6959
- process.stdout.write(chunk.choices[0]?.delta?.content || "");
6960
- }`
6961
- };
6962
- } else {
6963
- return snippetBasic3(model, accessToken);
6964
- }
6965
- };
6966
6910
  var snippetZeroShotClassification3 = (model, accessToken) => ({
6967
6911
  content: `async function query(data) {
6968
6912
  const response = await fetch(
@@ -7075,7 +7019,7 @@ var jsSnippets = {
7075
7019
  summarization: snippetBasic3,
7076
7020
  "feature-extraction": snippetBasic3,
7077
7021
  "text-generation": snippetTextGeneration2,
7078
- "image-text-to-text": snippetImageTextToTextGeneration2,
7022
+ "image-text-to-text": snippetTextGeneration2,
7079
7023
  "text2text-generation": snippetBasic3,
7080
7024
  "fill-mask": snippetBasic3,
7081
7025
  "sentence-similarity": snippetBasic3,
package/dist/index.js CHANGED
@@ -757,7 +757,7 @@ var MAPPING_DEFAULT_WIDGET = /* @__PURE__ */ new Map([
757
757
  ]);
758
758
 
759
759
  // src/pipelines.ts
760
- var MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
760
+ var MODALITIES = ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];
761
761
  var MODALITY_LABELS = {
762
762
  multimodal: "Multimodal",
763
763
  nlp: "Natural Language Processing",
@@ -5759,6 +5759,12 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
5759
5759
  repoUrl: "https://github.com/abetlen/llama-cpp-python",
5760
5760
  snippets: llama_cpp_python
5761
5761
  },
5762
+ "mini-omni2": {
5763
+ prettyLabel: "Mini-Omni2",
5764
+ repoName: "Mini-Omni2",
5765
+ repoUrl: "https://github.com/gpt-omni/mini-omni2",
5766
+ countDownloads: `path:"model_config.yaml"`
5767
+ },
5762
5768
  mindspore: {
5763
5769
  prettyLabel: "MindSpore",
5764
5770
  repoName: "mindspore",
@@ -6212,7 +6218,28 @@ var inputsQuestionAnswering = () => `{
6212
6218
  }`;
6213
6219
  var inputsTextClassification = () => `"I like you. I love you"`;
6214
6220
  var inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
6215
- var inputsTextGeneration = () => `"Can you please let us know more details about your "`;
6221
+ var inputsTextGeneration = (model) => {
6222
+ if (model.tags.includes("conversational")) {
6223
+ return model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
6224
+ {
6225
+ role: "user",
6226
+ content: [
6227
+ {
6228
+ type: "text",
6229
+ text: "Describe this image in one sentence."
6230
+ },
6231
+ {
6232
+ type: "image_url",
6233
+ image_url: {
6234
+ url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6235
+ }
6236
+ }
6237
+ ]
6238
+ }
6239
+ ];
6240
+ }
6241
+ return `"Can you please let us know more details about your "`;
6242
+ };
6216
6243
  var inputsText2TextGeneration = () => `"The answer to the universe is"`;
6217
6244
  var inputsFillMask = (model) => `"The answer to the universe is ${model.mask_token}."`;
6218
6245
  var inputsSentenceSimilarity = () => `{
@@ -6269,13 +6296,15 @@ function getModelInputSnippet(model, noWrap = false, noQuotes = false) {
6269
6296
  const inputs = modelInputSnippets[model.pipeline_tag];
6270
6297
  if (inputs) {
6271
6298
  let result = inputs(model);
6272
- if (noWrap) {
6273
- result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
6274
- }
6275
- if (noQuotes) {
6276
- const REGEX_QUOTES = /^"(.+)"$/s;
6277
- const match = result.match(REGEX_QUOTES);
6278
- result = match ? match[1] : result;
6299
+ if (typeof result === "string") {
6300
+ if (noWrap) {
6301
+ result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
6302
+ }
6303
+ if (noQuotes) {
6304
+ const REGEX_QUOTES = /^"(.+)"$/s;
6305
+ const match = result.match(REGEX_QUOTES);
6306
+ result = match ? match[1] : result;
6307
+ }
6279
6308
  }
6280
6309
  return result;
6281
6310
  }
@@ -6291,7 +6320,6 @@ __export(curl_exports, {
6291
6320
  hasCurlInferenceSnippet: () => hasCurlInferenceSnippet,
6292
6321
  snippetBasic: () => snippetBasic,
6293
6322
  snippetFile: () => snippetFile,
6294
- snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration,
6295
6323
  snippetTextGeneration: () => snippetTextGeneration,
6296
6324
  snippetZeroShotClassification: () => snippetZeroShotClassification
6297
6325
  });
@@ -6339,9 +6367,8 @@ var snippetBasic = (model, accessToken) => ({
6339
6367
  var snippetTextGeneration = (model, accessToken, opts) => {
6340
6368
  if (model.tags.includes("conversational")) {
6341
6369
  const streaming = opts?.streaming ?? true;
6342
- const messages = opts?.messages ?? [
6343
- { role: "user", content: "What is the capital of France?" }
6344
- ];
6370
+ const exampleMessages = getModelInputSnippet(model);
6371
+ const messages = opts?.messages ?? exampleMessages;
6345
6372
  const config = {
6346
6373
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
6347
6374
  max_tokens: opts?.max_tokens ?? 500,
@@ -6376,32 +6403,6 @@ var snippetTextGeneration = (model, accessToken, opts) => {
6376
6403
  return snippetBasic(model, accessToken);
6377
6404
  }
6378
6405
  };
6379
- var snippetImageTextToTextGeneration = (model, accessToken) => {
6380
- if (model.tags.includes("conversational")) {
6381
- return {
6382
- content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
6383
- -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
6384
- -H 'Content-Type: application/json' \\
6385
- -d '{
6386
- "model": "${model.id}",
6387
- "messages": [
6388
- {
6389
- "role": "user",
6390
- "content": [
6391
- {"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
6392
- {"type": "text", "text": "Describe this image in one sentence."}
6393
- ]
6394
- }
6395
- ],
6396
- "max_tokens": 500,
6397
- "stream": false
6398
- }'
6399
- `
6400
- };
6401
- } else {
6402
- return snippetBasic(model, accessToken);
6403
- }
6404
- };
6405
6406
  var snippetZeroShotClassification = (model, accessToken) => ({
6406
6407
  content: `curl https://api-inference.huggingface.co/models/${model.id} \\
6407
6408
  -X POST \\
@@ -6426,7 +6427,7 @@ var curlSnippets = {
6426
6427
  summarization: snippetBasic,
6427
6428
  "feature-extraction": snippetBasic,
6428
6429
  "text-generation": snippetTextGeneration,
6429
- "image-text-to-text": snippetImageTextToTextGeneration,
6430
+ "image-text-to-text": snippetTextGeneration,
6430
6431
  "text2text-generation": snippetBasic,
6431
6432
  "fill-mask": snippetBasic,
6432
6433
  "sentence-similarity": snippetBasic,
@@ -6456,7 +6457,6 @@ __export(python_exports, {
6456
6457
  pythonSnippets: () => pythonSnippets,
6457
6458
  snippetBasic: () => snippetBasic2,
6458
6459
  snippetConversational: () => snippetConversational,
6459
- snippetConversationalWithImage: () => snippetConversationalWithImage,
6460
6460
  snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
6461
6461
  snippetFile: () => snippetFile2,
6462
6462
  snippetTabular: () => snippetTabular,
@@ -6467,9 +6467,8 @@ __export(python_exports, {
6467
6467
  });
6468
6468
  var snippetConversational = (model, accessToken, opts) => {
6469
6469
  const streaming = opts?.streaming ?? true;
6470
- const messages = opts?.messages ?? [
6471
- { role: "user", content: "What is the capital of France?" }
6472
- ];
6470
+ const exampleMessages = getModelInputSnippet(model);
6471
+ const messages = opts?.messages ?? exampleMessages;
6473
6472
  const messagesStr = stringifyMessages(messages, {
6474
6473
  sep: ",\n ",
6475
6474
  start: `[
@@ -6571,29 +6570,6 @@ print(completion.choices[0].message)`
6571
6570
  ];
6572
6571
  }
6573
6572
  };
6574
- var snippetConversationalWithImage = (model, accessToken) => ({
6575
- content: `from huggingface_hub import InferenceClient
6576
-
6577
- client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
6578
-
6579
- image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6580
-
6581
- for message in client.chat_completion(
6582
- model="${model.id}",
6583
- messages=[
6584
- {
6585
- "role": "user",
6586
- "content": [
6587
- {"type": "image_url", "image_url": {"url": image_url}},
6588
- {"type": "text", "text": "Describe this image in one sentence."},
6589
- ],
6590
- }
6591
- ],
6592
- max_tokens=500,
6593
- stream=True,
6594
- ):
6595
- print(message.choices[0].delta.content, end="")`
6596
- });
6597
6573
  var snippetZeroShotClassification2 = (model) => ({
6598
6574
  content: `def query(payload):
6599
6575
  response = requests.post(API_URL, headers=headers, json=payload)
@@ -6711,7 +6687,7 @@ var pythonSnippets = {
6711
6687
  "feature-extraction": snippetBasic2,
6712
6688
  "text-generation": snippetBasic2,
6713
6689
  "text2text-generation": snippetBasic2,
6714
- "image-text-to-text": snippetConversationalWithImage,
6690
+ "image-text-to-text": snippetConversational,
6715
6691
  "fill-mask": snippetBasic2,
6716
6692
  "sentence-similarity": snippetBasic2,
6717
6693
  "automatic-speech-recognition": snippetFile2,
@@ -6730,10 +6706,8 @@ var pythonSnippets = {
6730
6706
  "zero-shot-image-classification": snippetZeroShotImageClassification
6731
6707
  };
6732
6708
  function getPythonInferenceSnippet(model, accessToken, opts) {
6733
- if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
6709
+ if (model.tags.includes("conversational")) {
6734
6710
  return snippetConversational(model, accessToken, opts);
6735
- } else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
6736
- return snippetConversationalWithImage(model, accessToken);
6737
6711
  } else {
6738
6712
  let snippets = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" } : { content: "" };
6739
6713
  snippets = Array.isArray(snippets) ? snippets : [snippets];
@@ -6762,7 +6736,6 @@ __export(js_exports, {
6762
6736
  jsSnippets: () => jsSnippets,
6763
6737
  snippetBasic: () => snippetBasic3,
6764
6738
  snippetFile: () => snippetFile3,
6765
- snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration2,
6766
6739
  snippetTextGeneration: () => snippetTextGeneration2,
6767
6740
  snippetTextToAudio: () => snippetTextToAudio2,
6768
6741
  snippetTextToImage: () => snippetTextToImage2,
@@ -6792,9 +6765,8 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
6792
6765
  var snippetTextGeneration2 = (model, accessToken, opts) => {
6793
6766
  if (model.tags.includes("conversational")) {
6794
6767
  const streaming = opts?.streaming ?? true;
6795
- const messages = opts?.messages ?? [
6796
- { role: "user", content: "What is the capital of France?" }
6797
- ];
6768
+ const exampleMessages = getModelInputSnippet(model);
6769
+ const messages = opts?.messages ?? exampleMessages;
6798
6770
  const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
6799
6771
  const config = {
6800
6772
  ...opts?.temperature ? { temperature: opts.temperature } : void 0,
@@ -6897,34 +6869,6 @@ console.log(chatCompletion.choices[0].message);`
6897
6869
  return snippetBasic3(model, accessToken);
6898
6870
  }
6899
6871
  };
6900
- var snippetImageTextToTextGeneration2 = (model, accessToken) => {
6901
- if (model.tags.includes("conversational")) {
6902
- return {
6903
- content: `import { HfInference } from "@huggingface/inference";
6904
-
6905
- const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
6906
- const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
6907
-
6908
- for await (const chunk of inference.chatCompletionStream({
6909
- model: "${model.id}",
6910
- messages: [
6911
- {
6912
- "role": "user",
6913
- "content": [
6914
- {"type": "image_url", "image_url": {"url": imageUrl}},
6915
- {"type": "text", "text": "Describe this image in one sentence."},
6916
- ],
6917
- }
6918
- ],
6919
- max_tokens: 500,
6920
- })) {
6921
- process.stdout.write(chunk.choices[0]?.delta?.content || "");
6922
- }`
6923
- };
6924
- } else {
6925
- return snippetBasic3(model, accessToken);
6926
- }
6927
- };
6928
6872
  var snippetZeroShotClassification3 = (model, accessToken) => ({
6929
6873
  content: `async function query(data) {
6930
6874
  const response = await fetch(
@@ -7037,7 +6981,7 @@ var jsSnippets = {
7037
6981
  summarization: snippetBasic3,
7038
6982
  "feature-extraction": snippetBasic3,
7039
6983
  "text-generation": snippetTextGeneration2,
7040
- "image-text-to-text": snippetImageTextToTextGeneration2,
6984
+ "image-text-to-text": snippetTextGeneration2,
7041
6985
  "text2text-generation": snippetBasic3,
7042
6986
  "fill-mask": snippetBasic3,
7043
6987
  "sentence-similarity": snippetBasic3,
@@ -367,6 +367,12 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
367
367
  repoUrl: string;
368
368
  snippets: (model: ModelData) => string[];
369
369
  };
370
+ "mini-omni2": {
371
+ prettyLabel: string;
372
+ repoName: string;
373
+ repoUrl: string;
374
+ countDownloads: string;
375
+ };
370
376
  mindspore: {
371
377
  prettyLabel: string;
372
378
  repoName: string;
@@ -766,6 +772,6 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
766
772
  };
767
773
  };
768
774
  export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
769
- export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
770
- export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
775
+ export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
776
+ export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
771
777
  //# sourceMappingURL=model-libraries.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAysBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,yyCAAgE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,yyCAQ1B,CAAC"}
1
+ {"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA+sBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,wzCAAgE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,wzCAQ1B,CAAC"}
@@ -1,4 +1,4 @@
1
- export declare const MODALITIES: readonly ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
1
+ export declare const MODALITIES: readonly ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];
2
2
  export type Modality = (typeof MODALITIES)[number];
3
3
  export declare const MODALITY_LABELS: {
4
4
  multimodal: string;
@@ -9,7 +9,6 @@ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToke
9
9
  max_tokens?: GenerationParameters["max_tokens"];
10
10
  top_p?: GenerationParameters["top_p"];
11
11
  }) => InferenceSnippet;
12
- export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
13
12
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
14
13
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
15
14
  export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet>>;
@@ -1 +1 @@
1
- {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAuCF,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBA0B/F,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM3F,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAKzE,CAAC;AAEH,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,gBAAgB,CAClG,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,gBAAgB,CAItG;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
1
+ {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAsCF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM3F,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAKzE,CAAC;AAEH,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,gBAAgB,CAClG,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,gBAAgB,CAItG;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
@@ -1,3 +1,4 @@
1
+ import type { ChatCompletionInputMessage } from "../tasks";
1
2
  import type { ModelDataMinimal } from "./types";
2
- export declare function getModelInputSnippet(model: ModelDataMinimal, noWrap?: boolean, noQuotes?: boolean): string;
3
+ export declare function getModelInputSnippet(model: ModelDataMinimal, noWrap?: boolean, noQuotes?: boolean): string | ChatCompletionInputMessage[];
3
4
  //# sourceMappingURL=inputs.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"inputs.d.ts","sourceRoot":"","sources":["../../../src/snippets/inputs.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AAqHhD,wBAAgB,oBAAoB,CAAC,KAAK,EAAE,gBAAgB,EAAE,MAAM,UAAQ,EAAE,QAAQ,UAAQ,GAAG,MAAM,CAiBtG"}
1
+ {"version":3,"file":"inputs.d.ts","sourceRoot":"","sources":["../../../src/snippets/inputs.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,0BAA0B,EAAE,MAAM,UAAU,CAAC;AAC3D,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AA4IhD,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,MAAM,UAAQ,EACd,QAAQ,UAAQ,GACd,MAAM,GAAG,0BAA0B,EAAE,CAmBvC"}
@@ -9,7 +9,6 @@ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToke
9
9
  max_tokens?: GenerationParameters["max_tokens"];
10
10
  top_p?: GenerationParameters["top_p"];
11
11
  }) => InferenceSnippet | InferenceSnippet[];
12
- export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
13
12
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
14
13
  export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
15
14
  export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
@@ -1 +1 @@
1
- {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAoB1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,GAAG,gBAAgB,EA8GrC,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBA4B/F,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAsB3F,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAmBhF,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAuCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAqBzE,CAAC;AAEH,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,GACjB,gBAAgB,GAAG,gBAAgB,EAAE,CAIvC;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
1
+ {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAoB1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,GAAG,gBAAgB,EA6GrC,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAsB3F,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAmBhF,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAuCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAqBzE,CAAC;AAEH,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,GACjB,gBAAgB,GAAG,gBAAgB,EAAE,CAIvC;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
@@ -8,7 +8,6 @@ export declare const snippetConversational: (model: ModelDataMinimal, accessToke
8
8
  max_tokens?: GenerationParameters["max_tokens"];
9
9
  top_p?: GenerationParameters["top_p"];
10
10
  }) => InferenceSnippet[];
11
- export declare const snippetConversationalWithImage: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
12
11
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => InferenceSnippet;
13
12
  export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet;
14
13
  export declare const snippetBasic: (model: ModelDataMinimal) => InferenceSnippet;
@@ -1 +1 @@
1
- {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAyGlB,CAAC;AAEF,eAAO,MAAM,8BAA8B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAsB5F,CAAC;AAEH,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAStE,CAAC;AAEH,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAe3E,CAAC;AAEH,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,gBAQrD,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAQpD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAW3D,CAAC;AAEH,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAOvD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBA+B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAWzE,CAAC;AAEH,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,GAAG,gBAAgB,EAAE,CA2BvC;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
1
+ {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAwGlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAStE,CAAC;AAEH,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAe3E,CAAC;AAEH,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,gBAQrD,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAQpD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAW3D,CAAC;AAEH,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAOvD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBA+B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAWzE,CAAC;AAEH,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,GAAG,gBAAgB,EAAE,CAwBvC;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-segmentation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA+Ff,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-segmentation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA8Ff,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAkFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAiFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-speech/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAyEf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-speech/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA0Ef,CAAC;AAEF,eAAe,QAAQ,CAAC"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.12.26",
4
+ "version": "0.12.28",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
@@ -373,6 +373,12 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
373
373
  repoUrl: "https://github.com/abetlen/llama-cpp-python",
374
374
  snippets: snippets.llama_cpp_python,
375
375
  },
376
+ "mini-omni2": {
377
+ prettyLabel: "Mini-Omni2",
378
+ repoName: "Mini-Omni2",
379
+ repoUrl: "https://github.com/gpt-omni/mini-omni2",
380
+ countDownloads: `path:"model_config.yaml"`,
381
+ },
376
382
  mindspore: {
377
383
  prettyLabel: "MindSpore",
378
384
  repoName: "mindspore",
package/src/pipelines.ts CHANGED
@@ -1,4 +1,4 @@
1
- export const MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"] as const;
1
+ export const MODALITIES = ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"] as const;
2
2
 
3
3
  export type Modality = (typeof MODALITIES)[number];
4
4
 
@@ -26,9 +26,8 @@ export const snippetTextGeneration = (
26
26
  if (model.tags.includes("conversational")) {
27
27
  // Conversational model detected, so we display a code snippet that features the Messages API
28
28
  const streaming = opts?.streaming ?? true;
29
- const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
30
- { role: "user", content: "What is the capital of France?" },
31
- ];
29
+ const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
30
+ const messages = opts?.messages ?? exampleMessages;
32
31
 
33
32
  const config = {
34
33
  ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
@@ -63,34 +62,6 @@ export const snippetTextGeneration = (
63
62
  }
64
63
  };
65
64
 
66
- export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
67
- if (model.tags.includes("conversational")) {
68
- // Conversational model detected, so we display a code snippet that features the Messages API
69
- return {
70
- content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
71
- -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
72
- -H 'Content-Type: application/json' \\
73
- -d '{
74
- "model": "${model.id}",
75
- "messages": [
76
- {
77
- "role": "user",
78
- "content": [
79
- {"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
80
- {"type": "text", "text": "Describe this image in one sentence."}
81
- ]
82
- }
83
- ],
84
- "max_tokens": 500,
85
- "stream": false
86
- }'
87
- `,
88
- };
89
- } else {
90
- return snippetBasic(model, accessToken);
91
- }
92
- };
93
-
94
65
  export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
95
66
  content: `curl https://api-inference.huggingface.co/models/${model.id} \\
96
67
  -X POST \\
@@ -122,7 +93,7 @@ export const curlSnippets: Partial<
122
93
  summarization: snippetBasic,
123
94
  "feature-extraction": snippetBasic,
124
95
  "text-generation": snippetTextGeneration,
125
- "image-text-to-text": snippetImageTextToTextGeneration,
96
+ "image-text-to-text": snippetTextGeneration,
126
97
  "text2text-generation": snippetBasic,
127
98
  "fill-mask": snippetBasic,
128
99
  "sentence-similarity": snippetBasic,
@@ -1,4 +1,5 @@
1
1
  import type { PipelineType } from "../pipelines";
2
+ import type { ChatCompletionInputMessage } from "../tasks";
2
3
  import type { ModelDataMinimal } from "./types";
3
4
 
4
5
  const inputsZeroShotClassification = () =>
@@ -40,7 +41,30 @@ const inputsTextClassification = () => `"I like you. I love you"`;
40
41
 
41
42
  const inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
42
43
 
43
- const inputsTextGeneration = () => `"Can you please let us know more details about your "`;
44
+ const inputsTextGeneration = (model: ModelDataMinimal): string | ChatCompletionInputMessage[] => {
45
+ if (model.tags.includes("conversational")) {
46
+ return model.pipeline_tag === "text-generation"
47
+ ? [{ role: "user", content: "What is the capital of France?" }]
48
+ : [
49
+ {
50
+ role: "user",
51
+ content: [
52
+ {
53
+ type: "text",
54
+ text: "Describe this image in one sentence.",
55
+ },
56
+ {
57
+ type: "image_url",
58
+ image_url: {
59
+ url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
60
+ },
61
+ },
62
+ ],
63
+ },
64
+ ];
65
+ }
66
+ return `"Can you please let us know more details about your "`;
67
+ };
44
68
 
45
69
  const inputsText2TextGeneration = () => `"The answer to the universe is"`;
46
70
 
@@ -84,7 +108,7 @@ const inputsTabularPrediction = () =>
84
108
  const inputsZeroShotImageClassification = () => `"cats.jpg"`;
85
109
 
86
110
  const modelInputSnippets: {
87
- [key in PipelineType]?: (model: ModelDataMinimal) => string;
111
+ [key in PipelineType]?: (model: ModelDataMinimal) => string | ChatCompletionInputMessage[];
88
112
  } = {
89
113
  "audio-to-audio": inputsAudioToAudio,
90
114
  "audio-classification": inputsAudioClassification,
@@ -116,18 +140,24 @@ const modelInputSnippets: {
116
140
 
117
141
  // Use noWrap to put the whole snippet on a single line (removing new lines and tabulations)
118
142
  // Use noQuotes to strip quotes from start & end (example: "abc" -> abc)
119
- export function getModelInputSnippet(model: ModelDataMinimal, noWrap = false, noQuotes = false): string {
143
+ export function getModelInputSnippet(
144
+ model: ModelDataMinimal,
145
+ noWrap = false,
146
+ noQuotes = false
147
+ ): string | ChatCompletionInputMessage[] {
120
148
  if (model.pipeline_tag) {
121
149
  const inputs = modelInputSnippets[model.pipeline_tag];
122
150
  if (inputs) {
123
151
  let result = inputs(model);
124
- if (noWrap) {
125
- result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
126
- }
127
- if (noQuotes) {
128
- const REGEX_QUOTES = /^"(.+)"$/s;
129
- const match = result.match(REGEX_QUOTES);
130
- result = match ? match[1] : result;
152
+ if (typeof result === "string") {
153
+ if (noWrap) {
154
+ result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
155
+ }
156
+ if (noQuotes) {
157
+ const REGEX_QUOTES = /^"(.+)"$/s;
158
+ const match = result.match(REGEX_QUOTES);
159
+ result = match ? match[1] : result;
160
+ }
131
161
  }
132
162
  return result;
133
163
  }
@@ -40,9 +40,8 @@ export const snippetTextGeneration = (
40
40
  if (model.tags.includes("conversational")) {
41
41
  // Conversational model detected, so we display a code snippet that features the Messages API
42
42
  const streaming = opts?.streaming ?? true;
43
- const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
44
- { role: "user", content: "What is the capital of France?" },
45
- ];
43
+ const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
44
+ const messages = opts?.messages ?? exampleMessages;
46
45
  const messagesStr = stringifyMessages(messages, { sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" });
47
46
 
48
47
  const config = {
@@ -148,36 +147,6 @@ console.log(chatCompletion.choices[0].message);`,
148
147
  }
149
148
  };
150
149
 
151
- export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
152
- if (model.tags.includes("conversational")) {
153
- // Conversational model detected, so we display a code snippet that features the Messages API
154
- return {
155
- content: `import { HfInference } from "@huggingface/inference";
156
-
157
- const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
158
- const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
159
-
160
- for await (const chunk of inference.chatCompletionStream({
161
- model: "${model.id}",
162
- messages: [
163
- {
164
- "role": "user",
165
- "content": [
166
- {"type": "image_url", "image_url": {"url": imageUrl}},
167
- {"type": "text", "text": "Describe this image in one sentence."},
168
- ],
169
- }
170
- ],
171
- max_tokens: 500,
172
- })) {
173
- process.stdout.write(chunk.choices[0]?.delta?.content || "");
174
- }`,
175
- };
176
- } else {
177
- return snippetBasic(model, accessToken);
178
- }
179
- };
180
-
181
150
  export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
182
151
  content: `async function query(data) {
183
152
  const response = await fetch(
@@ -307,7 +276,7 @@ export const jsSnippets: Partial<
307
276
  summarization: snippetBasic,
308
277
  "feature-extraction": snippetBasic,
309
278
  "text-generation": snippetTextGeneration,
310
- "image-text-to-text": snippetImageTextToTextGeneration,
279
+ "image-text-to-text": snippetTextGeneration,
311
280
  "text2text-generation": snippetBasic,
312
281
  "fill-mask": snippetBasic,
313
282
  "sentence-similarity": snippetBasic,
@@ -16,9 +16,8 @@ export const snippetConversational = (
16
16
  }
17
17
  ): InferenceSnippet[] => {
18
18
  const streaming = opts?.streaming ?? true;
19
- const messages: ChatCompletionInputMessage[] = opts?.messages ?? [
20
- { role: "user", content: "What is the capital of France?" },
21
- ];
19
+ const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
20
+ const messages = opts?.messages ?? exampleMessages;
22
21
  const messagesStr = stringifyMessages(messages, {
23
22
  sep: ",\n\t",
24
23
  start: `[\n\t`,
@@ -121,30 +120,6 @@ print(completion.choices[0].message)`,
121
120
  }
122
121
  };
123
122
 
124
- export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
125
- content: `from huggingface_hub import InferenceClient
126
-
127
- client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
128
-
129
- image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
130
-
131
- for message in client.chat_completion(
132
- model="${model.id}",
133
- messages=[
134
- {
135
- "role": "user",
136
- "content": [
137
- {"type": "image_url", "image_url": {"url": image_url}},
138
- {"type": "text", "text": "Describe this image in one sentence."},
139
- ],
140
- }
141
- ],
142
- max_tokens=500,
143
- stream=True,
144
- ):
145
- print(message.choices[0].delta.content, end="")`,
146
- });
147
-
148
123
  export const snippetZeroShotClassification = (model: ModelDataMinimal): InferenceSnippet => ({
149
124
  content: `def query(payload):
150
125
  response = requests.post(API_URL, headers=headers, json=payload)
@@ -282,7 +257,7 @@ export const pythonSnippets: Partial<
282
257
  "feature-extraction": snippetBasic,
283
258
  "text-generation": snippetBasic,
284
259
  "text2text-generation": snippetBasic,
285
- "image-text-to-text": snippetConversationalWithImage,
260
+ "image-text-to-text": snippetConversational,
286
261
  "fill-mask": snippetBasic,
287
262
  "sentence-similarity": snippetBasic,
288
263
  "automatic-speech-recognition": snippetFile,
@@ -306,12 +281,9 @@ export function getPythonInferenceSnippet(
306
281
  accessToken: string,
307
282
  opts?: Record<string, unknown>
308
283
  ): InferenceSnippet | InferenceSnippet[] {
309
- if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
284
+ if (model.tags.includes("conversational")) {
310
285
  // Conversational model detected, so we display a code snippet that features the Messages API
311
286
  return snippetConversational(model, accessToken, opts);
312
- } else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
313
- // Example sending an image to the Message API
314
- return snippetConversationalWithImage(model, accessToken);
315
287
  } else {
316
288
  let snippets =
317
289
  model.pipeline_tag && model.pipeline_tag in pythonSnippets
@@ -44,8 +44,7 @@ const taskData: TaskDataCustom = {
44
44
  models: [
45
45
  {
46
46
  // TO DO: write description
47
- description:
48
- "Solid semantic segmentation model trained on ADE20k.",
47
+ description: "Solid semantic segmentation model trained on ADE20k.",
49
48
  id: "openmmlab/upernet-convnext-small",
50
49
  },
51
50
  {
@@ -51,8 +51,7 @@ const taskData: TaskDataCustom = {
51
51
  id: "jameslahm/yolov10x",
52
52
  },
53
53
  {
54
- description:
55
- "Fast and accurate object detection model trained on COCO and Object365 datasets.",
54
+ description: "Fast and accurate object detection model trained on COCO and Object365 datasets.",
56
55
  id: "PekingU/rtdetr_r18vd_coco_o365",
57
56
  },
58
57
  ],
@@ -57,7 +57,8 @@ const taskData: TaskDataCustom = {
57
57
  id: "suno/bark",
58
58
  },
59
59
  {
60
- description: "An application on XTTS, a voice generation model that lets you clone voices into different languages.",
60
+ description:
61
+ "An application on XTTS, a voice generation model that lets you clone voices into different languages.",
61
62
  id: "coqui/xtts",
62
63
  },
63
64
  {