@huggingface/tasks 0.10.20 → 0.10.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -5246,39 +5246,39 @@ var inputsZeroShotClassification = () => `"Hi, I recently bought a device from y
5246
5246
  var inputsTranslation = () => `"\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435"`;
5247
5247
  var inputsSummarization = () => `"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`;
5248
5248
  var inputsTableQuestionAnswering = () => `{
5249
- "query": "How many stars does the transformers repository have?",
5250
- "table": {
5251
- "Repository": ["Transformers", "Datasets", "Tokenizers"],
5252
- "Stars": ["36542", "4512", "3934"],
5253
- "Contributors": ["651", "77", "34"],
5254
- "Programming language": [
5255
- "Python",
5256
- "Python",
5257
- "Rust, Python and NodeJS"
5258
- ]
5259
- }
5260
- }`;
5249
+ "query": "How many stars does the transformers repository have?",
5250
+ "table": {
5251
+ "Repository": ["Transformers", "Datasets", "Tokenizers"],
5252
+ "Stars": ["36542", "4512", "3934"],
5253
+ "Contributors": ["651", "77", "34"],
5254
+ "Programming language": [
5255
+ "Python",
5256
+ "Python",
5257
+ "Rust, Python and NodeJS"
5258
+ ]
5259
+ }
5260
+ }`;
5261
5261
  var inputsVisualQuestionAnswering = () => `{
5262
- "image": "cat.png",
5263
- "question": "What is in this image?"
5264
- }`;
5262
+ "image": "cat.png",
5263
+ "question": "What is in this image?"
5264
+ }`;
5265
5265
  var inputsQuestionAnswering = () => `{
5266
- "question": "What is my name?",
5267
- "context": "My name is Clara and I live in Berkeley."
5268
- }`;
5266
+ "question": "What is my name?",
5267
+ "context": "My name is Clara and I live in Berkeley."
5268
+ }`;
5269
5269
  var inputsTextClassification = () => `"I like you. I love you"`;
5270
5270
  var inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
5271
5271
  var inputsTextGeneration = () => `"Can you please let us know more details about your "`;
5272
5272
  var inputsText2TextGeneration = () => `"The answer to the universe is"`;
5273
5273
  var inputsFillMask = (model) => `"The answer to the universe is ${model.mask_token}."`;
5274
5274
  var inputsSentenceSimilarity = () => `{
5275
- "source_sentence": "That is a happy person",
5276
- "sentences": [
5277
- "That is a happy dog",
5278
- "That is a very happy person",
5279
- "Today is a sunny day"
5280
- ]
5281
- }`;
5275
+ "source_sentence": "That is a happy person",
5276
+ "sentences": [
5277
+ "That is a happy dog",
5278
+ "That is a very happy person",
5279
+ "Today is a sunny day"
5280
+ ]
5281
+ }`;
5282
5282
  var inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`;
5283
5283
  var inputsImageClassification = () => `"cats.jpg"`;
5284
5284
  var inputsImageToText = () => `"cats.jpg"`;
@@ -5347,6 +5347,7 @@ __export(curl_exports, {
5347
5347
  hasCurlInferenceSnippet: () => hasCurlInferenceSnippet,
5348
5348
  snippetBasic: () => snippetBasic,
5349
5349
  snippetFile: () => snippetFile,
5350
+ snippetTextGeneration: () => snippetTextGeneration,
5350
5351
  snippetZeroShotClassification: () => snippetZeroShotClassification
5351
5352
  });
5352
5353
  var snippetBasic = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\
@@ -5355,6 +5356,22 @@ var snippetBasic = (model, accessToken) => `curl https://api-inference.huggingfa
5355
5356
  -H 'Content-Type: application/json' \\
5356
5357
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
5357
5358
  `;
5359
+ var snippetTextGeneration = (model, accessToken) => {
5360
+ if (model.config?.tokenizer_config?.chat_template) {
5361
+ return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
5362
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
5363
+ -H 'Content-Type: application/json' \\
5364
+ -d '{
5365
+ "model": "${model.id}",
5366
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
5367
+ "max_tokens": 500,
5368
+ "stream": false
5369
+ }'
5370
+ `;
5371
+ } else {
5372
+ return snippetBasic(model, accessToken);
5373
+ }
5374
+ };
5358
5375
  var snippetZeroShotClassification = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\
5359
5376
  -X POST \\
5360
5377
  -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
@@ -5376,7 +5393,7 @@ var curlSnippets = {
5376
5393
  translation: snippetBasic,
5377
5394
  summarization: snippetBasic,
5378
5395
  "feature-extraction": snippetBasic,
5379
- "text-generation": snippetBasic,
5396
+ "text-generation": snippetTextGeneration,
5380
5397
  "text2text-generation": snippetBasic,
5381
5398
  "fill-mask": snippetBasic,
5382
5399
  "sentence-similarity": snippetBasic,
@@ -5405,6 +5422,7 @@ __export(python_exports, {
5405
5422
  hasPythonInferenceSnippet: () => hasPythonInferenceSnippet,
5406
5423
  pythonSnippets: () => pythonSnippets,
5407
5424
  snippetBasic: () => snippetBasic2,
5425
+ snippetConversational: () => snippetConversational,
5408
5426
  snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
5409
5427
  snippetFile: () => snippetFile2,
5410
5428
  snippetTabular: () => snippetTabular,
@@ -5413,6 +5431,20 @@ __export(python_exports, {
5413
5431
  snippetZeroShotClassification: () => snippetZeroShotClassification2,
5414
5432
  snippetZeroShotImageClassification: () => snippetZeroShotImageClassification
5415
5433
  });
5434
+ var snippetConversational = (model, accessToken) => `from huggingface_hub import InferenceClient
5435
+
5436
+ client = InferenceClient(
5437
+ "${model.id}",
5438
+ token="${accessToken || "{API_TOKEN}"}",
5439
+ )
5440
+
5441
+ for message in client.chat_completion(
5442
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
5443
+ max_tokens=500,
5444
+ stream=True,
5445
+ ):
5446
+ print(message.choices[0].delta.content, end="")
5447
+ `;
5416
5448
  var snippetZeroShotClassification2 = (model) => `def query(payload):
5417
5449
  response = requests.post(API_URL, headers=headers, json=payload)
5418
5450
  return response.json()
@@ -5530,13 +5562,17 @@ var pythonSnippets = {
5530
5562
  "zero-shot-image-classification": snippetZeroShotImageClassification
5531
5563
  };
5532
5564
  function getPythonInferenceSnippet(model, accessToken) {
5533
- const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model) ?? "" : "";
5534
- return `import requests
5565
+ if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) {
5566
+ return snippetConversational(model, accessToken);
5567
+ } else {
5568
+ const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : "";
5569
+ return `import requests
5535
5570
 
5536
5571
  API_URL = "https://api-inference.huggingface.co/models/${model.id}"
5537
5572
  headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
5538
5573
 
5539
5574
  ${body}`;
5575
+ }
5540
5576
  }
5541
5577
  function hasPythonInferenceSnippet(model) {
5542
5578
  return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets;
@@ -5550,6 +5586,7 @@ __export(js_exports, {
5550
5586
  jsSnippets: () => jsSnippets,
5551
5587
  snippetBasic: () => snippetBasic3,
5552
5588
  snippetFile: () => snippetFile3,
5589
+ snippetTextGeneration: () => snippetTextGeneration2,
5553
5590
  snippetTextToAudio: () => snippetTextToAudio2,
5554
5591
  snippetTextToImage: () => snippetTextToImage2,
5555
5592
  snippetZeroShotClassification: () => snippetZeroShotClassification3
@@ -5558,7 +5595,10 @@ var snippetBasic3 = (model, accessToken) => `async function query(data) {
5558
5595
  const response = await fetch(
5559
5596
  "https://api-inference.huggingface.co/models/${model.id}",
5560
5597
  {
5561
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5598
+ headers: {
5599
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5600
+ "Content-Type": "application/json",
5601
+ },
5562
5602
  method: "POST",
5563
5603
  body: JSON.stringify(data),
5564
5604
  }
@@ -5570,11 +5610,32 @@ var snippetBasic3 = (model, accessToken) => `async function query(data) {
5570
5610
  query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
5571
5611
  console.log(JSON.stringify(response));
5572
5612
  });`;
5613
+ var snippetTextGeneration2 = (model, accessToken) => {
5614
+ if (model.config?.tokenizer_config?.chat_template) {
5615
+ return `import { HfInference } from "@huggingface/inference";
5616
+
5617
+ const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
5618
+
5619
+ for await (const chunk of inference.chatCompletionStream({
5620
+ model: "${model.id}",
5621
+ messages: [{ role: "user", content: "What is the capital of France?" }],
5622
+ max_tokens: 500,
5623
+ })) {
5624
+ process.stdout.write(chunk.choices[0]?.delta?.content || "");
5625
+ }
5626
+ `;
5627
+ } else {
5628
+ return snippetBasic3(model, accessToken);
5629
+ }
5630
+ };
5573
5631
  var snippetZeroShotClassification3 = (model, accessToken) => `async function query(data) {
5574
5632
  const response = await fetch(
5575
5633
  "https://api-inference.huggingface.co/models/${model.id}",
5576
5634
  {
5577
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5635
+ headers: {
5636
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5637
+ "Content-Type": "application/json",
5638
+ },
5578
5639
  method: "POST",
5579
5640
  body: JSON.stringify(data),
5580
5641
  }
@@ -5592,7 +5653,10 @@ var snippetTextToImage2 = (model, accessToken) => `async function query(data) {
5592
5653
  const response = await fetch(
5593
5654
  "https://api-inference.huggingface.co/models/${model.id}",
5594
5655
  {
5595
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5656
+ headers: {
5657
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5658
+ "Content-Type": "application/json",
5659
+ },
5596
5660
  method: "POST",
5597
5661
  body: JSON.stringify(data),
5598
5662
  }
@@ -5608,7 +5672,10 @@ var snippetTextToAudio2 = (model, accessToken) => {
5608
5672
  const response = await fetch(
5609
5673
  "https://api-inference.huggingface.co/models/${model.id}",
5610
5674
  {
5611
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5675
+ headers: {
5676
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5677
+ "Content-Type": "application/json",
5678
+ },
5612
5679
  method: "POST",
5613
5680
  body: JSON.stringify(data),
5614
5681
  }
@@ -5637,7 +5704,10 @@ var snippetFile3 = (model, accessToken) => `async function query(filename) {
5637
5704
  const response = await fetch(
5638
5705
  "https://api-inference.huggingface.co/models/${model.id}",
5639
5706
  {
5640
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5707
+ headers: {
5708
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5709
+ "Content-Type": "application/json",
5710
+ },
5641
5711
  method: "POST",
5642
5712
  body: data,
5643
5713
  }
@@ -5659,7 +5729,7 @@ var jsSnippets = {
5659
5729
  translation: snippetBasic3,
5660
5730
  summarization: snippetBasic3,
5661
5731
  "feature-extraction": snippetBasic3,
5662
- "text-generation": snippetBasic3,
5732
+ "text-generation": snippetTextGeneration2,
5663
5733
  "text2text-generation": snippetBasic3,
5664
5734
  "fill-mask": snippetBasic3,
5665
5735
  "sentence-similarity": snippetBasic3,
@@ -6121,7 +6191,7 @@ var LOCAL_APPS = {
6121
6191
  docsUrl: "https://lmstudio.ai",
6122
6192
  mainTask: "text-generation",
6123
6193
  displayOnModelPage: isGgufModel,
6124
- deeplink: (model, filepath) => new URL(`lmstudio://open_from_hf?model=${model.id}` + filepath ? `&file=${filepath}` : "")
6194
+ deeplink: (model, filepath) => new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`)
6125
6195
  },
6126
6196
  jan: {
6127
6197
  prettyLabel: "Jan",
package/dist/index.js CHANGED
@@ -5207,39 +5207,39 @@ var inputsZeroShotClassification = () => `"Hi, I recently bought a device from y
5207
5207
  var inputsTranslation = () => `"\u041C\u0435\u043D\u044F \u0437\u043E\u0432\u0443\u0442 \u0412\u043E\u043B\u044C\u0444\u0433\u0430\u043D\u0433 \u0438 \u044F \u0436\u0438\u0432\u0443 \u0432 \u0411\u0435\u0440\u043B\u0438\u043D\u0435"`;
5208
5208
  var inputsSummarization = () => `"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`;
5209
5209
  var inputsTableQuestionAnswering = () => `{
5210
- "query": "How many stars does the transformers repository have?",
5211
- "table": {
5212
- "Repository": ["Transformers", "Datasets", "Tokenizers"],
5213
- "Stars": ["36542", "4512", "3934"],
5214
- "Contributors": ["651", "77", "34"],
5215
- "Programming language": [
5216
- "Python",
5217
- "Python",
5218
- "Rust, Python and NodeJS"
5219
- ]
5220
- }
5221
- }`;
5210
+ "query": "How many stars does the transformers repository have?",
5211
+ "table": {
5212
+ "Repository": ["Transformers", "Datasets", "Tokenizers"],
5213
+ "Stars": ["36542", "4512", "3934"],
5214
+ "Contributors": ["651", "77", "34"],
5215
+ "Programming language": [
5216
+ "Python",
5217
+ "Python",
5218
+ "Rust, Python and NodeJS"
5219
+ ]
5220
+ }
5221
+ }`;
5222
5222
  var inputsVisualQuestionAnswering = () => `{
5223
- "image": "cat.png",
5224
- "question": "What is in this image?"
5225
- }`;
5223
+ "image": "cat.png",
5224
+ "question": "What is in this image?"
5225
+ }`;
5226
5226
  var inputsQuestionAnswering = () => `{
5227
- "question": "What is my name?",
5228
- "context": "My name is Clara and I live in Berkeley."
5229
- }`;
5227
+ "question": "What is my name?",
5228
+ "context": "My name is Clara and I live in Berkeley."
5229
+ }`;
5230
5230
  var inputsTextClassification = () => `"I like you. I love you"`;
5231
5231
  var inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
5232
5232
  var inputsTextGeneration = () => `"Can you please let us know more details about your "`;
5233
5233
  var inputsText2TextGeneration = () => `"The answer to the universe is"`;
5234
5234
  var inputsFillMask = (model) => `"The answer to the universe is ${model.mask_token}."`;
5235
5235
  var inputsSentenceSimilarity = () => `{
5236
- "source_sentence": "That is a happy person",
5237
- "sentences": [
5238
- "That is a happy dog",
5239
- "That is a very happy person",
5240
- "Today is a sunny day"
5241
- ]
5242
- }`;
5236
+ "source_sentence": "That is a happy person",
5237
+ "sentences": [
5238
+ "That is a happy dog",
5239
+ "That is a very happy person",
5240
+ "Today is a sunny day"
5241
+ ]
5242
+ }`;
5243
5243
  var inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`;
5244
5244
  var inputsImageClassification = () => `"cats.jpg"`;
5245
5245
  var inputsImageToText = () => `"cats.jpg"`;
@@ -5308,6 +5308,7 @@ __export(curl_exports, {
5308
5308
  hasCurlInferenceSnippet: () => hasCurlInferenceSnippet,
5309
5309
  snippetBasic: () => snippetBasic,
5310
5310
  snippetFile: () => snippetFile,
5311
+ snippetTextGeneration: () => snippetTextGeneration,
5311
5312
  snippetZeroShotClassification: () => snippetZeroShotClassification
5312
5313
  });
5313
5314
  var snippetBasic = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\
@@ -5316,6 +5317,22 @@ var snippetBasic = (model, accessToken) => `curl https://api-inference.huggingfa
5316
5317
  -H 'Content-Type: application/json' \\
5317
5318
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
5318
5319
  `;
5320
+ var snippetTextGeneration = (model, accessToken) => {
5321
+ if (model.config?.tokenizer_config?.chat_template) {
5322
+ return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
5323
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
5324
+ -H 'Content-Type: application/json' \\
5325
+ -d '{
5326
+ "model": "${model.id}",
5327
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
5328
+ "max_tokens": 500,
5329
+ "stream": false
5330
+ }'
5331
+ `;
5332
+ } else {
5333
+ return snippetBasic(model, accessToken);
5334
+ }
5335
+ };
5319
5336
  var snippetZeroShotClassification = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\
5320
5337
  -X POST \\
5321
5338
  -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
@@ -5337,7 +5354,7 @@ var curlSnippets = {
5337
5354
  translation: snippetBasic,
5338
5355
  summarization: snippetBasic,
5339
5356
  "feature-extraction": snippetBasic,
5340
- "text-generation": snippetBasic,
5357
+ "text-generation": snippetTextGeneration,
5341
5358
  "text2text-generation": snippetBasic,
5342
5359
  "fill-mask": snippetBasic,
5343
5360
  "sentence-similarity": snippetBasic,
@@ -5366,6 +5383,7 @@ __export(python_exports, {
5366
5383
  hasPythonInferenceSnippet: () => hasPythonInferenceSnippet,
5367
5384
  pythonSnippets: () => pythonSnippets,
5368
5385
  snippetBasic: () => snippetBasic2,
5386
+ snippetConversational: () => snippetConversational,
5369
5387
  snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
5370
5388
  snippetFile: () => snippetFile2,
5371
5389
  snippetTabular: () => snippetTabular,
@@ -5374,6 +5392,20 @@ __export(python_exports, {
5374
5392
  snippetZeroShotClassification: () => snippetZeroShotClassification2,
5375
5393
  snippetZeroShotImageClassification: () => snippetZeroShotImageClassification
5376
5394
  });
5395
+ var snippetConversational = (model, accessToken) => `from huggingface_hub import InferenceClient
5396
+
5397
+ client = InferenceClient(
5398
+ "${model.id}",
5399
+ token="${accessToken || "{API_TOKEN}"}",
5400
+ )
5401
+
5402
+ for message in client.chat_completion(
5403
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
5404
+ max_tokens=500,
5405
+ stream=True,
5406
+ ):
5407
+ print(message.choices[0].delta.content, end="")
5408
+ `;
5377
5409
  var snippetZeroShotClassification2 = (model) => `def query(payload):
5378
5410
  response = requests.post(API_URL, headers=headers, json=payload)
5379
5411
  return response.json()
@@ -5491,13 +5523,17 @@ var pythonSnippets = {
5491
5523
  "zero-shot-image-classification": snippetZeroShotImageClassification
5492
5524
  };
5493
5525
  function getPythonInferenceSnippet(model, accessToken) {
5494
- const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model) ?? "" : "";
5495
- return `import requests
5526
+ if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) {
5527
+ return snippetConversational(model, accessToken);
5528
+ } else {
5529
+ const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : "";
5530
+ return `import requests
5496
5531
 
5497
5532
  API_URL = "https://api-inference.huggingface.co/models/${model.id}"
5498
5533
  headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
5499
5534
 
5500
5535
  ${body}`;
5536
+ }
5501
5537
  }
5502
5538
  function hasPythonInferenceSnippet(model) {
5503
5539
  return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets;
@@ -5511,6 +5547,7 @@ __export(js_exports, {
5511
5547
  jsSnippets: () => jsSnippets,
5512
5548
  snippetBasic: () => snippetBasic3,
5513
5549
  snippetFile: () => snippetFile3,
5550
+ snippetTextGeneration: () => snippetTextGeneration2,
5514
5551
  snippetTextToAudio: () => snippetTextToAudio2,
5515
5552
  snippetTextToImage: () => snippetTextToImage2,
5516
5553
  snippetZeroShotClassification: () => snippetZeroShotClassification3
@@ -5519,7 +5556,10 @@ var snippetBasic3 = (model, accessToken) => `async function query(data) {
5519
5556
  const response = await fetch(
5520
5557
  "https://api-inference.huggingface.co/models/${model.id}",
5521
5558
  {
5522
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5559
+ headers: {
5560
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5561
+ "Content-Type": "application/json",
5562
+ },
5523
5563
  method: "POST",
5524
5564
  body: JSON.stringify(data),
5525
5565
  }
@@ -5531,11 +5571,32 @@ var snippetBasic3 = (model, accessToken) => `async function query(data) {
5531
5571
  query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
5532
5572
  console.log(JSON.stringify(response));
5533
5573
  });`;
5574
+ var snippetTextGeneration2 = (model, accessToken) => {
5575
+ if (model.config?.tokenizer_config?.chat_template) {
5576
+ return `import { HfInference } from "@huggingface/inference";
5577
+
5578
+ const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
5579
+
5580
+ for await (const chunk of inference.chatCompletionStream({
5581
+ model: "${model.id}",
5582
+ messages: [{ role: "user", content: "What is the capital of France?" }],
5583
+ max_tokens: 500,
5584
+ })) {
5585
+ process.stdout.write(chunk.choices[0]?.delta?.content || "");
5586
+ }
5587
+ `;
5588
+ } else {
5589
+ return snippetBasic3(model, accessToken);
5590
+ }
5591
+ };
5534
5592
  var snippetZeroShotClassification3 = (model, accessToken) => `async function query(data) {
5535
5593
  const response = await fetch(
5536
5594
  "https://api-inference.huggingface.co/models/${model.id}",
5537
5595
  {
5538
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5596
+ headers: {
5597
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5598
+ "Content-Type": "application/json",
5599
+ },
5539
5600
  method: "POST",
5540
5601
  body: JSON.stringify(data),
5541
5602
  }
@@ -5553,7 +5614,10 @@ var snippetTextToImage2 = (model, accessToken) => `async function query(data) {
5553
5614
  const response = await fetch(
5554
5615
  "https://api-inference.huggingface.co/models/${model.id}",
5555
5616
  {
5556
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5617
+ headers: {
5618
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5619
+ "Content-Type": "application/json",
5620
+ },
5557
5621
  method: "POST",
5558
5622
  body: JSON.stringify(data),
5559
5623
  }
@@ -5569,7 +5633,10 @@ var snippetTextToAudio2 = (model, accessToken) => {
5569
5633
  const response = await fetch(
5570
5634
  "https://api-inference.huggingface.co/models/${model.id}",
5571
5635
  {
5572
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5636
+ headers: {
5637
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5638
+ "Content-Type": "application/json",
5639
+ },
5573
5640
  method: "POST",
5574
5641
  body: JSON.stringify(data),
5575
5642
  }
@@ -5598,7 +5665,10 @@ var snippetFile3 = (model, accessToken) => `async function query(filename) {
5598
5665
  const response = await fetch(
5599
5666
  "https://api-inference.huggingface.co/models/${model.id}",
5600
5667
  {
5601
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
5668
+ headers: {
5669
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
5670
+ "Content-Type": "application/json",
5671
+ },
5602
5672
  method: "POST",
5603
5673
  body: data,
5604
5674
  }
@@ -5620,7 +5690,7 @@ var jsSnippets = {
5620
5690
  translation: snippetBasic3,
5621
5691
  summarization: snippetBasic3,
5622
5692
  "feature-extraction": snippetBasic3,
5623
- "text-generation": snippetBasic3,
5693
+ "text-generation": snippetTextGeneration2,
5624
5694
  "text2text-generation": snippetBasic3,
5625
5695
  "fill-mask": snippetBasic3,
5626
5696
  "sentence-similarity": snippetBasic3,
@@ -6082,7 +6152,7 @@ var LOCAL_APPS = {
6082
6152
  docsUrl: "https://lmstudio.ai",
6083
6153
  mainTask: "text-generation",
6084
6154
  displayOnModelPage: isGgufModel,
6085
- deeplink: (model, filepath) => new URL(`lmstudio://open_from_hf?model=${model.id}` + filepath ? `&file=${filepath}` : "")
6155
+ deeplink: (model, filepath) => new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`)
6086
6156
  },
6087
6157
  jan: {
6088
6158
  prettyLabel: "Jan",
@@ -1,6 +1,7 @@
1
1
  import type { PipelineType } from "../pipelines.js";
2
2
  import type { ModelDataMinimal } from "./types.js";
3
3
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => string;
4
+ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string) => string;
4
5
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string;
5
6
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => string;
6
7
  export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>>;
@@ -1 +1 @@
1
- {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAM3E,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAM5F,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAK1E,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwBhH,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI5F;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
1
+ {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAM3E,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAgBpF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAM5F,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAK1E,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwBhH,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI5F;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
@@ -1,6 +1,7 @@
1
1
  import type { PipelineType } from "../pipelines.js";
2
2
  import type { ModelDataMinimal } from "./types.js";
3
3
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => string;
4
+ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string) => string;
4
5
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string;
5
6
  export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => string;
6
7
  export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => string;
@@ -1 +1 @@
1
- {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAgBxE,CAAC;AAEL,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkBzF,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAe9E,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAiBvE,CAAC;AAEL,eAAO,MAAM,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwB9G,CAAC;AAEF,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI1F;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
1
+ {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAmBxE,CAAC;AAEL,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkBpF,CAAC;AACF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqBzF,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkB9E,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAoBvE,CAAC;AAEL,eAAO,MAAM,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwB9G,CAAC;AAEF,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI1F;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
@@ -1,5 +1,6 @@
1
1
  import type { PipelineType } from "../pipelines.js";
2
2
  import type { ModelDataMinimal } from "./types.js";
3
+ export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string) => string;
3
4
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => string;
4
5
  export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => string;
5
6
  export declare const snippetBasic: (model: ModelDataMinimal) => string;
@@ -8,7 +9,7 @@ export declare const snippetTextToImage: (model: ModelDataMinimal) => string;
8
9
  export declare const snippetTabular: (model: ModelDataMinimal) => string;
9
10
  export declare const snippetTextToAudio: (model: ModelDataMinimal) => string;
10
11
  export declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => string;
11
- export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal) => string>>;
12
+ export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>>;
12
13
  export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string;
13
14
  export declare function hasPythonInferenceSnippet(model: ModelDataMinimal): boolean;
14
15
  //# sourceMappingURL=python.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,MAQrE,CAAC;AAEJ,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,MAc1E,CAAC;AAEJ,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,MAOpD,CAAC;AAEJ,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,MAOP,CAAC;AAEhD,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MAUjB,CAAC;AAE7C,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,MAMtD,CAAC;AAEJ,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MA2B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,MAUxE,CAAC;AAEJ,eAAO,MAAM,cAAc,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,KAAK,MAAM,CAAC,CA4B7F,CAAC;AAEF,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAU9F;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
1
+ {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAcpF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,MAQrE,CAAC;AAEJ,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,MAc1E,CAAC;AAEJ,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,MAOpD,CAAC;AAEJ,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,MAOP,CAAC;AAEhD,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MAUjB,CAAC;AAE7C,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,MAMtD,CAAC;AAEJ,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MA2B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,MAUxE,CAAC;AAEJ,eAAO,MAAM,cAAc,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CA4BlH,CAAC;AAEF,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAiB9F;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
@@ -4,5 +4,5 @@ import type { ModelData } from "../model-data";
4
4
  *
5
5
  * Add more fields as needed.
6
6
  */
7
- export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name">;
7
+ export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config">;
8
8
  //# sourceMappingURL=types.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../../src/snippets/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAE/C;;;;GAIG;AACH,MAAM,MAAM,gBAAgB,GAAG,IAAI,CAAC,SAAS,EAAE,IAAI,GAAG,cAAc,GAAG,YAAY,GAAG,cAAc,CAAC,CAAC"}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../../src/snippets/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAE/C;;;;GAIG;AACH,MAAM,MAAM,gBAAgB,GAAG,IAAI,CAAC,SAAS,EAAE,IAAI,GAAG,cAAc,GAAG,YAAY,GAAG,cAAc,GAAG,QAAQ,CAAC,CAAC"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.10.20",
4
+ "version": "0.10.21",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
package/src/local-apps.ts CHANGED
@@ -97,7 +97,7 @@ export const LOCAL_APPS = {
97
97
  mainTask: "text-generation",
98
98
  displayOnModelPage: isGgufModel,
99
99
  deeplink: (model, filepath) =>
100
- new URL(`lmstudio://open_from_hf?model=${model.id}` + filepath ? `&file=${filepath}` : ""),
100
+ new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`),
101
101
  },
102
102
  jan: {
103
103
  prettyLabel: "Jan",
@@ -10,6 +10,24 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): stri
10
10
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
11
11
  `;
12
12
 
13
+ export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
14
+ if (model.config?.tokenizer_config?.chat_template) {
15
+ // Conversational model detected, so we display a code snippet that features the Messages API
16
+ return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
17
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
18
+ -H 'Content-Type: application/json' \\
19
+ -d '{
20
+ "model": "${model.id}",
21
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
22
+ "max_tokens": 500,
23
+ "stream": false
24
+ }'
25
+ `;
26
+ } else {
27
+ return snippetBasic(model, accessToken);
28
+ }
29
+ };
30
+
13
31
  export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
14
32
  `curl https://api-inference.huggingface.co/models/${model.id} \\
15
33
  -X POST \\
@@ -35,7 +53,7 @@ export const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal
35
53
  translation: snippetBasic,
36
54
  summarization: snippetBasic,
37
55
  "feature-extraction": snippetBasic,
38
- "text-generation": snippetBasic,
56
+ "text-generation": snippetTextGeneration,
39
57
  "text2text-generation": snippetBasic,
40
58
  "fill-mask": snippetBasic,
41
59
  "sentence-similarity": snippetBasic,
@@ -11,30 +11,30 @@ const inputsSummarization = () =>
11
11
 
12
12
  const inputsTableQuestionAnswering = () =>
13
13
  `{
14
- "query": "How many stars does the transformers repository have?",
15
- "table": {
16
- "Repository": ["Transformers", "Datasets", "Tokenizers"],
17
- "Stars": ["36542", "4512", "3934"],
18
- "Contributors": ["651", "77", "34"],
19
- "Programming language": [
20
- "Python",
21
- "Python",
22
- "Rust, Python and NodeJS"
23
- ]
24
- }
25
- }`;
14
+ "query": "How many stars does the transformers repository have?",
15
+ "table": {
16
+ "Repository": ["Transformers", "Datasets", "Tokenizers"],
17
+ "Stars": ["36542", "4512", "3934"],
18
+ "Contributors": ["651", "77", "34"],
19
+ "Programming language": [
20
+ "Python",
21
+ "Python",
22
+ "Rust, Python and NodeJS"
23
+ ]
24
+ }
25
+ }`;
26
26
 
27
27
  const inputsVisualQuestionAnswering = () =>
28
28
  `{
29
- "image": "cat.png",
30
- "question": "What is in this image?"
31
- }`;
29
+ "image": "cat.png",
30
+ "question": "What is in this image?"
31
+ }`;
32
32
 
33
33
  const inputsQuestionAnswering = () =>
34
34
  `{
35
- "question": "What is my name?",
36
- "context": "My name is Clara and I live in Berkeley."
37
- }`;
35
+ "question": "What is my name?",
36
+ "context": "My name is Clara and I live in Berkeley."
37
+ }`;
38
38
 
39
39
  const inputsTextClassification = () => `"I like you. I love you"`;
40
40
 
@@ -48,13 +48,13 @@ const inputsFillMask = (model: ModelDataMinimal) => `"The answer to the universe
48
48
 
49
49
  const inputsSentenceSimilarity = () =>
50
50
  `{
51
- "source_sentence": "That is a happy person",
52
- "sentences": [
53
- "That is a happy dog",
54
- "That is a very happy person",
55
- "Today is a sunny day"
56
- ]
57
- }`;
51
+ "source_sentence": "That is a happy person",
52
+ "sentences": [
53
+ "That is a happy dog",
54
+ "That is a very happy person",
55
+ "Today is a sunny day"
56
+ ]
57
+ }`;
58
58
 
59
59
  const inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`;
60
60
 
@@ -7,7 +7,10 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): stri
7
7
  const response = await fetch(
8
8
  "https://api-inference.huggingface.co/models/${model.id}",
9
9
  {
10
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
10
+ headers: {
11
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
12
+ "Content-Type": "application/json",
13
+ },
11
14
  method: "POST",
12
15
  body: JSON.stringify(data),
13
16
  }
@@ -20,12 +23,34 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
20
23
  console.log(JSON.stringify(response));
21
24
  });`;
22
25
 
26
+ export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
27
+ if (model.config?.tokenizer_config?.chat_template) {
28
+ // Conversational model detected, so we display a code snippet that features the Messages API
29
+ return `import { HfInference } from "@huggingface/inference";
30
+
31
+ const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
32
+
33
+ for await (const chunk of inference.chatCompletionStream({
34
+ model: "${model.id}",
35
+ messages: [{ role: "user", content: "What is the capital of France?" }],
36
+ max_tokens: 500,
37
+ })) {
38
+ process.stdout.write(chunk.choices[0]?.delta?.content || "");
39
+ }
40
+ `;
41
+ } else {
42
+ return snippetBasic(model, accessToken);
43
+ }
44
+ };
23
45
  export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
24
46
  `async function query(data) {
25
47
  const response = await fetch(
26
48
  "https://api-inference.huggingface.co/models/${model.id}",
27
49
  {
28
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
50
+ headers: {
51
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
52
+ "Content-Type": "application/json",
53
+ },
29
54
  method: "POST",
30
55
  body: JSON.stringify(data),
31
56
  }
@@ -45,7 +70,10 @@ export const snippetTextToImage = (model: ModelDataMinimal, accessToken: string)
45
70
  const response = await fetch(
46
71
  "https://api-inference.huggingface.co/models/${model.id}",
47
72
  {
48
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
73
+ headers: {
74
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
75
+ "Content-Type": "application/json",
76
+ },
49
77
  method: "POST",
50
78
  body: JSON.stringify(data),
51
79
  }
@@ -62,7 +90,10 @@ export const snippetTextToAudio = (model: ModelDataMinimal, accessToken: string)
62
90
  const response = await fetch(
63
91
  "https://api-inference.huggingface.co/models/${model.id}",
64
92
  {
65
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
93
+ headers: {
94
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
95
+ "Content-Type": "application/json",
96
+ },
66
97
  method: "POST",
67
98
  body: JSON.stringify(data),
68
99
  }
@@ -99,7 +130,10 @@ export const snippetFile = (model: ModelDataMinimal, accessToken: string): strin
99
130
  const response = await fetch(
100
131
  "https://api-inference.huggingface.co/models/${model.id}",
101
132
  {
102
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
133
+ headers: {
134
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
135
+ "Content-Type": "application/json",
136
+ },
103
137
  method: "POST",
104
138
  body: data,
105
139
  }
@@ -122,7 +156,7 @@ export const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal,
122
156
  translation: snippetBasic,
123
157
  summarization: snippetBasic,
124
158
  "feature-extraction": snippetBasic,
125
- "text-generation": snippetBasic,
159
+ "text-generation": snippetTextGeneration,
126
160
  "text2text-generation": snippetBasic,
127
161
  "fill-mask": snippetBasic,
128
162
  "sentence-similarity": snippetBasic,
@@ -2,6 +2,22 @@ import type { PipelineType } from "../pipelines.js";
2
2
  import { getModelInputSnippet } from "./inputs.js";
3
3
  import type { ModelDataMinimal } from "./types.js";
4
4
 
5
+ export const snippetConversational = (model: ModelDataMinimal, accessToken: string): string =>
6
+ `from huggingface_hub import InferenceClient
7
+
8
+ client = InferenceClient(
9
+ "${model.id}",
10
+ token="${accessToken || "{API_TOKEN}"}",
11
+ )
12
+
13
+ for message in client.chat_completion(
14
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
15
+ max_tokens=500,
16
+ stream=True,
17
+ ):
18
+ print(message.choices[0].delta.content, end="")
19
+ `;
20
+
5
21
  export const snippetZeroShotClassification = (model: ModelDataMinimal): string =>
6
22
  `def query(payload):
7
23
  response = requests.post(API_URL, headers=headers, json=payload)
@@ -107,7 +123,7 @@ output = query({
107
123
  "inputs": ${getModelInputSnippet(model)},
108
124
  })`;
109
125
 
110
- export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal) => string>> = {
126
+ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>> = {
111
127
  // Same order as in tasks/src/pipelines.ts
112
128
  "text-classification": snippetBasic,
113
129
  "token-classification": snippetBasic,
@@ -138,15 +154,22 @@ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinim
138
154
  };
139
155
 
140
156
  export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string {
141
- const body =
142
- model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model) ?? "" : "";
157
+ if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) {
158
+ // Conversational model detected, so we display a code snippet that features the Messages API
159
+ return snippetConversational(model, accessToken);
160
+ } else {
161
+ const body =
162
+ model.pipeline_tag && model.pipeline_tag in pythonSnippets
163
+ ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
164
+ : "";
143
165
 
144
- return `import requests
166
+ return `import requests
145
167
 
146
168
  API_URL = "https://api-inference.huggingface.co/models/${model.id}"
147
169
  headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
148
170
 
149
171
  ${body}`;
172
+ }
150
173
  }
151
174
 
152
175
  export function hasPythonInferenceSnippet(model: ModelDataMinimal): boolean {
@@ -5,4 +5,4 @@ import type { ModelData } from "../model-data";
5
5
  *
6
6
  * Add more fields as needed.
7
7
  */
8
- export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name">;
8
+ export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config">;