@huggingface/tasks 0.12.10 → 0.12.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/dist/index.cjs +83 -7
  2. package/dist/index.js +83 -7
  3. package/dist/src/snippets/curl.d.ts +1 -0
  4. package/dist/src/snippets/curl.d.ts.map +1 -1
  5. package/dist/src/snippets/js.d.ts +1 -0
  6. package/dist/src/snippets/js.d.ts.map +1 -1
  7. package/dist/src/snippets/python.d.ts +1 -0
  8. package/dist/src/snippets/python.d.ts.map +1 -1
  9. package/dist/src/snippets/types.d.ts +1 -1
  10. package/dist/src/snippets/types.d.ts.map +1 -1
  11. package/dist/src/tasks/automatic-speech-recognition/inference.d.ts +1 -1
  12. package/dist/src/tasks/automatic-speech-recognition/inference.d.ts.map +1 -1
  13. package/dist/src/tasks/image-to-text/inference.d.ts +1 -1
  14. package/dist/src/tasks/image-to-text/inference.d.ts.map +1 -1
  15. package/dist/src/tasks/text-to-audio/inference.d.ts +1 -1
  16. package/dist/src/tasks/text-to-audio/inference.d.ts.map +1 -1
  17. package/dist/src/tasks/text-to-speech/inference.d.ts +1 -1
  18. package/dist/src/tasks/text-to-speech/inference.d.ts.map +1 -1
  19. package/package.json +1 -1
  20. package/src/snippets/curl.ts +28 -1
  21. package/src/snippets/js.ts +31 -1
  22. package/src/snippets/python.ts +29 -5
  23. package/src/snippets/types.ts +4 -1
  24. package/src/tasks/automatic-speech-recognition/inference.ts +1 -1
  25. package/src/tasks/automatic-speech-recognition/spec/input.json +1 -1
  26. package/src/tasks/image-to-text/inference.ts +1 -1
  27. package/src/tasks/image-to-text/spec/input.json +1 -1
  28. package/src/tasks/text-to-audio/inference.ts +1 -1
  29. package/src/tasks/text-to-audio/spec/input.json +1 -1
  30. package/src/tasks/text-to-speech/inference.ts +1 -1
  31. package/src/tasks/text-to-speech/spec/input.json +1 -1
package/dist/index.cjs CHANGED
@@ -6204,6 +6204,7 @@ __export(curl_exports, {
6204
6204
  hasCurlInferenceSnippet: () => hasCurlInferenceSnippet,
6205
6205
  snippetBasic: () => snippetBasic,
6206
6206
  snippetFile: () => snippetFile,
6207
+ snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration,
6207
6208
  snippetTextGeneration: () => snippetTextGeneration,
6208
6209
  snippetZeroShotClassification: () => snippetZeroShotClassification
6209
6210
  });
@@ -6213,7 +6214,7 @@ var snippetBasic = (model, accessToken) => `curl https://api-inference.huggingfa
6213
6214
  -H 'Content-Type: application/json' \\
6214
6215
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`;
6215
6216
  var snippetTextGeneration = (model, accessToken) => {
6216
- if (model.config?.tokenizer_config?.chat_template) {
6217
+ if (model.tags.includes("conversational")) {
6217
6218
  return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
6218
6219
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
6219
6220
  -H 'Content-Type: application/json' \\
@@ -6228,6 +6229,30 @@ var snippetTextGeneration = (model, accessToken) => {
6228
6229
  return snippetBasic(model, accessToken);
6229
6230
  }
6230
6231
  };
6232
+ var snippetImageTextToTextGeneration = (model, accessToken) => {
6233
+ if (model.tags.includes("conversational")) {
6234
+ return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
6235
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
6236
+ -H 'Content-Type: application/json' \\
6237
+ -d '{
6238
+ "model": "${model.id}",
6239
+ "messages": [
6240
+ {
6241
+ "role": "user",
6242
+ "content": [
6243
+ {"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
6244
+ {"type": "text", "text": "Describe this image in one sentence."}
6245
+ ]
6246
+ }
6247
+ ],
6248
+ "max_tokens": 500,
6249
+ "stream": false
6250
+ }'
6251
+ `;
6252
+ } else {
6253
+ return snippetBasic(model, accessToken);
6254
+ }
6255
+ };
6231
6256
  var snippetZeroShotClassification = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\
6232
6257
  -X POST \\
6233
6258
  -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
@@ -6248,6 +6273,7 @@ var curlSnippets = {
6248
6273
  summarization: snippetBasic,
6249
6274
  "feature-extraction": snippetBasic,
6250
6275
  "text-generation": snippetTextGeneration,
6276
+ "image-text-to-text": snippetImageTextToTextGeneration,
6251
6277
  "text2text-generation": snippetBasic,
6252
6278
  "fill-mask": snippetBasic,
6253
6279
  "sentence-similarity": snippetBasic,
@@ -6277,6 +6303,7 @@ __export(python_exports, {
6277
6303
  pythonSnippets: () => pythonSnippets,
6278
6304
  snippetBasic: () => snippetBasic2,
6279
6305
  snippetConversational: () => snippetConversational,
6306
+ snippetConversationalWithImage: () => snippetConversationalWithImage,
6280
6307
  snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
6281
6308
  snippetFile: () => snippetFile2,
6282
6309
  snippetTabular: () => snippetTabular,
@@ -6287,17 +6314,36 @@ __export(python_exports, {
6287
6314
  });
6288
6315
  var snippetConversational = (model, accessToken) => `from huggingface_hub import InferenceClient
6289
6316
 
6290
- client = InferenceClient(
6291
- "${model.id}",
6292
- token="${accessToken || "{API_TOKEN}"}",
6293
- )
6317
+ client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
6294
6318
 
6295
6319
  for message in client.chat_completion(
6320
+ model="${model.id}",
6296
6321
  messages=[{"role": "user", "content": "What is the capital of France?"}],
6297
6322
  max_tokens=500,
6298
6323
  stream=True,
6299
6324
  ):
6300
6325
  print(message.choices[0].delta.content, end="")`;
6326
+ var snippetConversationalWithImage = (model, accessToken) => `from huggingface_hub import InferenceClient
6327
+
6328
+ client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
6329
+
6330
+ image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6331
+
6332
+ for message in client.chat_completion(
6333
+ model="${model.id}",
6334
+ messages=[
6335
+ {
6336
+ "role": "user",
6337
+ "content": [
6338
+ {"type": "image_url", "image_url": {"url": image_url}},
6339
+ {"type": "text", "text": "Describe this image in one sentence."},
6340
+ ],
6341
+ }
6342
+ ],
6343
+ max_tokens=500,
6344
+ stream=True,
6345
+ ):
6346
+ print(message.choices[0].delta.content, end="")`;
6301
6347
  var snippetZeroShotClassification2 = (model) => `def query(payload):
6302
6348
  response = requests.post(API_URL, headers=headers, json=payload)
6303
6349
  return response.json()
@@ -6415,8 +6461,10 @@ var pythonSnippets = {
6415
6461
  "zero-shot-image-classification": snippetZeroShotImageClassification
6416
6462
  };
6417
6463
  function getPythonInferenceSnippet(model, accessToken) {
6418
- if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) {
6464
+ if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
6419
6465
  return snippetConversational(model, accessToken);
6466
+ } else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
6467
+ return snippetConversationalWithImage(model, accessToken);
6420
6468
  } else {
6421
6469
  const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : "";
6422
6470
  return `import requests
@@ -6439,6 +6487,7 @@ __export(js_exports, {
6439
6487
  jsSnippets: () => jsSnippets,
6440
6488
  snippetBasic: () => snippetBasic3,
6441
6489
  snippetFile: () => snippetFile3,
6490
+ snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration2,
6442
6491
  snippetTextGeneration: () => snippetTextGeneration2,
6443
6492
  snippetTextToAudio: () => snippetTextToAudio2,
6444
6493
  snippetTextToImage: () => snippetTextToImage2,
@@ -6464,7 +6513,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
6464
6513
  console.log(JSON.stringify(response));
6465
6514
  });`;
6466
6515
  var snippetTextGeneration2 = (model, accessToken) => {
6467
- if (model.config?.tokenizer_config?.chat_template) {
6516
+ if (model.tags.includes("conversational")) {
6468
6517
  return `import { HfInference } from "@huggingface/inference";
6469
6518
 
6470
6519
  const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
@@ -6480,6 +6529,32 @@ for await (const chunk of inference.chatCompletionStream({
6480
6529
  return snippetBasic3(model, accessToken);
6481
6530
  }
6482
6531
  };
6532
+ var snippetImageTextToTextGeneration2 = (model, accessToken) => {
6533
+ if (model.tags.includes("conversational")) {
6534
+ return `import { HfInference } from "@huggingface/inference";
6535
+
6536
+ const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
6537
+ const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
6538
+
6539
+ for await (const chunk of inference.chatCompletionStream({
6540
+ model: "${model.id}",
6541
+ messages: [
6542
+ {
6543
+ "role": "user",
6544
+ "content": [
6545
+ {"type": "image_url", "image_url": {"url": imageUrl}},
6546
+ {"type": "text", "text": "Describe this image in one sentence."},
6547
+ ],
6548
+ }
6549
+ ],
6550
+ max_tokens: 500,
6551
+ })) {
6552
+ process.stdout.write(chunk.choices[0]?.delta?.content || "");
6553
+ }`;
6554
+ } else {
6555
+ return snippetBasic3(model, accessToken);
6556
+ }
6557
+ };
6483
6558
  var snippetZeroShotClassification3 = (model, accessToken) => `async function query(data) {
6484
6559
  const response = await fetch(
6485
6560
  "https://api-inference.huggingface.co/models/${model.id}",
@@ -6582,6 +6657,7 @@ var jsSnippets = {
6582
6657
  summarization: snippetBasic3,
6583
6658
  "feature-extraction": snippetBasic3,
6584
6659
  "text-generation": snippetTextGeneration2,
6660
+ "image-text-to-text": snippetImageTextToTextGeneration2,
6585
6661
  "text2text-generation": snippetBasic3,
6586
6662
  "fill-mask": snippetBasic3,
6587
6663
  "sentence-similarity": snippetBasic3,
package/dist/index.js CHANGED
@@ -6166,6 +6166,7 @@ __export(curl_exports, {
6166
6166
  hasCurlInferenceSnippet: () => hasCurlInferenceSnippet,
6167
6167
  snippetBasic: () => snippetBasic,
6168
6168
  snippetFile: () => snippetFile,
6169
+ snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration,
6169
6170
  snippetTextGeneration: () => snippetTextGeneration,
6170
6171
  snippetZeroShotClassification: () => snippetZeroShotClassification
6171
6172
  });
@@ -6175,7 +6176,7 @@ var snippetBasic = (model, accessToken) => `curl https://api-inference.huggingfa
6175
6176
  -H 'Content-Type: application/json' \\
6176
6177
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`;
6177
6178
  var snippetTextGeneration = (model, accessToken) => {
6178
- if (model.config?.tokenizer_config?.chat_template) {
6179
+ if (model.tags.includes("conversational")) {
6179
6180
  return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
6180
6181
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
6181
6182
  -H 'Content-Type: application/json' \\
@@ -6190,6 +6191,30 @@ var snippetTextGeneration = (model, accessToken) => {
6190
6191
  return snippetBasic(model, accessToken);
6191
6192
  }
6192
6193
  };
6194
+ var snippetImageTextToTextGeneration = (model, accessToken) => {
6195
+ if (model.tags.includes("conversational")) {
6196
+ return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
6197
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
6198
+ -H 'Content-Type: application/json' \\
6199
+ -d '{
6200
+ "model": "${model.id}",
6201
+ "messages": [
6202
+ {
6203
+ "role": "user",
6204
+ "content": [
6205
+ {"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
6206
+ {"type": "text", "text": "Describe this image in one sentence."}
6207
+ ]
6208
+ }
6209
+ ],
6210
+ "max_tokens": 500,
6211
+ "stream": false
6212
+ }'
6213
+ `;
6214
+ } else {
6215
+ return snippetBasic(model, accessToken);
6216
+ }
6217
+ };
6193
6218
  var snippetZeroShotClassification = (model, accessToken) => `curl https://api-inference.huggingface.co/models/${model.id} \\
6194
6219
  -X POST \\
6195
6220
  -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
@@ -6210,6 +6235,7 @@ var curlSnippets = {
6210
6235
  summarization: snippetBasic,
6211
6236
  "feature-extraction": snippetBasic,
6212
6237
  "text-generation": snippetTextGeneration,
6238
+ "image-text-to-text": snippetImageTextToTextGeneration,
6213
6239
  "text2text-generation": snippetBasic,
6214
6240
  "fill-mask": snippetBasic,
6215
6241
  "sentence-similarity": snippetBasic,
@@ -6239,6 +6265,7 @@ __export(python_exports, {
6239
6265
  pythonSnippets: () => pythonSnippets,
6240
6266
  snippetBasic: () => snippetBasic2,
6241
6267
  snippetConversational: () => snippetConversational,
6268
+ snippetConversationalWithImage: () => snippetConversationalWithImage,
6242
6269
  snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
6243
6270
  snippetFile: () => snippetFile2,
6244
6271
  snippetTabular: () => snippetTabular,
@@ -6249,17 +6276,36 @@ __export(python_exports, {
6249
6276
  });
6250
6277
  var snippetConversational = (model, accessToken) => `from huggingface_hub import InferenceClient
6251
6278
 
6252
- client = InferenceClient(
6253
- "${model.id}",
6254
- token="${accessToken || "{API_TOKEN}"}",
6255
- )
6279
+ client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
6256
6280
 
6257
6281
  for message in client.chat_completion(
6282
+ model="${model.id}",
6258
6283
  messages=[{"role": "user", "content": "What is the capital of France?"}],
6259
6284
  max_tokens=500,
6260
6285
  stream=True,
6261
6286
  ):
6262
6287
  print(message.choices[0].delta.content, end="")`;
6288
+ var snippetConversationalWithImage = (model, accessToken) => `from huggingface_hub import InferenceClient
6289
+
6290
+ client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
6291
+
6292
+ image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
6293
+
6294
+ for message in client.chat_completion(
6295
+ model="${model.id}",
6296
+ messages=[
6297
+ {
6298
+ "role": "user",
6299
+ "content": [
6300
+ {"type": "image_url", "image_url": {"url": image_url}},
6301
+ {"type": "text", "text": "Describe this image in one sentence."},
6302
+ ],
6303
+ }
6304
+ ],
6305
+ max_tokens=500,
6306
+ stream=True,
6307
+ ):
6308
+ print(message.choices[0].delta.content, end="")`;
6263
6309
  var snippetZeroShotClassification2 = (model) => `def query(payload):
6264
6310
  response = requests.post(API_URL, headers=headers, json=payload)
6265
6311
  return response.json()
@@ -6377,8 +6423,10 @@ var pythonSnippets = {
6377
6423
  "zero-shot-image-classification": snippetZeroShotImageClassification
6378
6424
  };
6379
6425
  function getPythonInferenceSnippet(model, accessToken) {
6380
- if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) {
6426
+ if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
6381
6427
  return snippetConversational(model, accessToken);
6428
+ } else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
6429
+ return snippetConversationalWithImage(model, accessToken);
6382
6430
  } else {
6383
6431
  const body = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" : "";
6384
6432
  return `import requests
@@ -6401,6 +6449,7 @@ __export(js_exports, {
6401
6449
  jsSnippets: () => jsSnippets,
6402
6450
  snippetBasic: () => snippetBasic3,
6403
6451
  snippetFile: () => snippetFile3,
6452
+ snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration2,
6404
6453
  snippetTextGeneration: () => snippetTextGeneration2,
6405
6454
  snippetTextToAudio: () => snippetTextToAudio2,
6406
6455
  snippetTextToImage: () => snippetTextToImage2,
@@ -6426,7 +6475,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
6426
6475
  console.log(JSON.stringify(response));
6427
6476
  });`;
6428
6477
  var snippetTextGeneration2 = (model, accessToken) => {
6429
- if (model.config?.tokenizer_config?.chat_template) {
6478
+ if (model.tags.includes("conversational")) {
6430
6479
  return `import { HfInference } from "@huggingface/inference";
6431
6480
 
6432
6481
  const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
@@ -6442,6 +6491,32 @@ for await (const chunk of inference.chatCompletionStream({
6442
6491
  return snippetBasic3(model, accessToken);
6443
6492
  }
6444
6493
  };
6494
+ var snippetImageTextToTextGeneration2 = (model, accessToken) => {
6495
+ if (model.tags.includes("conversational")) {
6496
+ return `import { HfInference } from "@huggingface/inference";
6497
+
6498
+ const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
6499
+ const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
6500
+
6501
+ for await (const chunk of inference.chatCompletionStream({
6502
+ model: "${model.id}",
6503
+ messages: [
6504
+ {
6505
+ "role": "user",
6506
+ "content": [
6507
+ {"type": "image_url", "image_url": {"url": imageUrl}},
6508
+ {"type": "text", "text": "Describe this image in one sentence."},
6509
+ ],
6510
+ }
6511
+ ],
6512
+ max_tokens: 500,
6513
+ })) {
6514
+ process.stdout.write(chunk.choices[0]?.delta?.content || "");
6515
+ }`;
6516
+ } else {
6517
+ return snippetBasic3(model, accessToken);
6518
+ }
6519
+ };
6445
6520
  var snippetZeroShotClassification3 = (model, accessToken) => `async function query(data) {
6446
6521
  const response = await fetch(
6447
6522
  "https://api-inference.huggingface.co/models/${model.id}",
@@ -6544,6 +6619,7 @@ var jsSnippets = {
6544
6619
  summarization: snippetBasic3,
6545
6620
  "feature-extraction": snippetBasic3,
6546
6621
  "text-generation": snippetTextGeneration2,
6622
+ "image-text-to-text": snippetImageTextToTextGeneration2,
6547
6623
  "text2text-generation": snippetBasic3,
6548
6624
  "fill-mask": snippetBasic3,
6549
6625
  "sentence-similarity": snippetBasic3,
@@ -2,6 +2,7 @@ import type { PipelineType } from "../pipelines.js";
2
2
  import type { ModelDataMinimal } from "./types.js";
3
3
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => string;
4
4
  export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string) => string;
5
+ export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => string;
5
6
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string;
6
7
  export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => string;
7
8
  export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>>;
@@ -1 +1 @@
1
- {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAKhB,CAAC;AAE7D,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAgBpF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAKjC,CAAC;AAE7D,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAIf,CAAC;AAE7D,eAAO,MAAM,YAAY,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwBhH,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI5F;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
1
+ {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAKhB,CAAC;AAE7D,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAgBpF,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAwB/F,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAKjC,CAAC;AAE7D,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAIf,CAAC;AAE7D,eAAO,MAAM,YAAY,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAyBhH,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI5F;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
@@ -2,6 +2,7 @@ import type { PipelineType } from "../pipelines.js";
2
2
  import type { ModelDataMinimal } from "./types.js";
3
3
  export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => string;
4
4
  export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string) => string;
5
+ export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => string;
5
6
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string;
6
7
  export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => string;
7
8
  export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => string;
@@ -1 +1 @@
1
- {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAmBxE,CAAC;AAEL,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAiBpF,CAAC;AACF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqBzF,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkB9E,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAoBvE,CAAC;AAEL,eAAO,MAAM,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwB9G,CAAC;AAEF,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI1F;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
1
+ {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAmBxE,CAAC;AAEL,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAiBpF,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,eAAe,MAAM,KAAG,MA0B/F,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqBzF,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkB9E,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAoBvE,CAAC;AAEL,eAAO,MAAM,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAyB9G,CAAC;AAEF,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI1F;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
@@ -1,6 +1,7 @@
1
1
  import type { PipelineType } from "../pipelines.js";
2
2
  import type { ModelDataMinimal } from "./types.js";
3
3
  export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string) => string;
4
+ export declare const snippetConversationalWithImage: (model: ModelDataMinimal, accessToken: string) => string;
4
5
  export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => string;
5
6
  export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => string;
6
7
  export declare const snippetBasic: (model: ModelDataMinimal) => string;
@@ -1 +1 @@
1
- {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAajC,CAAC;AAErD,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,MAQrE,CAAC;AAEJ,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,MAc1E,CAAC;AAEJ,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,MAOpD,CAAC;AAEJ,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,MAOP,CAAC;AAEhD,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MAUjB,CAAC;AAE7C,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,MAMtD,CAAC;AAEJ,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MA2B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,MAUxE,CAAC;AAEJ,eAAO,MAAM,cAAc,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CA4BlH,CAAC;AAEF,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAiB9F;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
1
+ {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,qBAAqB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAWjC,CAAC;AAErD,eAAO,MAAM,8BAA8B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAqB7C,CAAC;AAElD,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,MAQrE,CAAC;AAEJ,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,MAc1E,CAAC;AAEJ,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,MAOpD,CAAC;AAEJ,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,MAOP,CAAC;AAEhD,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MAUjB,CAAC;AAE7C,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,MAMtD,CAAC;AAEJ,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MA2B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,MAUxE,CAAC;AAEJ,eAAO,MAAM,cAAc,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CA4BlH,CAAC;AAEF,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAoB9F;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
@@ -4,5 +4,5 @@ import type { ModelData } from "../model-data";
4
4
  *
5
5
  * Add more fields as needed.
6
6
  */
7
- export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config">;
7
+ export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config" | "tags">;
8
8
  //# sourceMappingURL=types.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../../src/snippets/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAE/C;;;;GAIG;AACH,MAAM,MAAM,gBAAgB,GAAG,IAAI,CAAC,SAAS,EAAE,IAAI,GAAG,cAAc,GAAG,YAAY,GAAG,cAAc,GAAG,QAAQ,CAAC,CAAC"}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../../src/snippets/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAE/C;;;;GAIG;AACH,MAAM,MAAM,gBAAgB,GAAG,IAAI,CAClC,SAAS,EACT,IAAI,GAAG,cAAc,GAAG,YAAY,GAAG,cAAc,GAAG,QAAQ,GAAG,MAAM,CACzE,CAAC"}
@@ -27,7 +27,7 @@ export interface AutomaticSpeechRecognitionParameters {
27
27
  /**
28
28
  * Parametrization of the text generation process
29
29
  */
30
- generate?: GenerationParameters;
30
+ generation_parameters?: GenerationParameters;
31
31
  /**
32
32
  * Whether to output corresponding timestamps with the generated text
33
33
  */
@@ -1 +1 @@
1
- {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/automatic-speech-recognition/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,+BAA+B;IAC/C;;;OAGG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,oCAAoC,CAAC;IAClD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oCAAoC;IACpD;;OAEG;IACH,QAAQ,CAAC,EAAE,oBAAoB,CAAC;IAChC;;OAEG;IACH,iBAAiB,CAAC,EAAE,OAAO,CAAC;IAC5B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;;OAGG;IACH,MAAM,CAAC,EAAE,qCAAqC,EAAE,CAAC;IACjD;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,qCAAqC;IACrD;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,UAAU,EAAE,MAAM,EAAE,CAAC;IACrB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/automatic-speech-recognition/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,+BAA+B;IAC/C;;;OAGG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,oCAAoC,CAAC;IAClD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oCAAoC;IACpD;;OAEG;IACH,qBAAqB,CAAC,EAAE,oBAAoB,CAAC;IAC7C;;OAEG;IACH,iBAAiB,CAAC,EAAE,OAAO,CAAC;IAC5B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;;OAGG;IACH,MAAM,CAAC,EAAE,qCAAqC,EAAE,CAAC;IACjD;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,qCAAqC;IACrD;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,UAAU,EAAE,MAAM,EAAE,CAAC;IACrB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
@@ -26,7 +26,7 @@ export interface ImageToTextParameters {
26
26
  /**
27
27
  * Parametrization of the text generation process
28
28
  */
29
- generate?: GenerationParameters;
29
+ generation_parameters?: GenerationParameters;
30
30
  /**
31
31
  * The amount of maximum tokens to generate.
32
32
  */
@@ -1 +1 @@
1
- {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-text/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,qBAAqB,CAAC;IACnC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,QAAQ,CAAC,EAAE,oBAAoB,CAAC;IAChC;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC,aAAa,EAAE,OAAO,CAAC;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-text/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,qBAAqB,CAAC;IACnC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,qBAAqB,CAAC,EAAE,oBAAoB,CAAC;IAC7C;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC,aAAa,EAAE,OAAO,CAAC;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
@@ -26,7 +26,7 @@ export interface TextToAudioParameters {
26
26
  /**
27
27
  * Parametrization of the text generation process
28
28
  */
29
- generate?: GenerationParameters;
29
+ generation_parameters?: GenerationParameters;
30
30
  [property: string]: unknown;
31
31
  }
32
32
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-audio/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,qBAAqB,CAAC;IACnC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,QAAQ,CAAC,EAAE,oBAAoB,CAAC;IAChC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,YAAY,EAAE,OAAO,CAAC;IACtB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-audio/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,qBAAqB,CAAC;IACnC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,qBAAqB;IACrC;;OAEG;IACH,qBAAqB,CAAC,EAAE,oBAAoB,CAAC;IAC7C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,YAAY,EAAE,OAAO,CAAC;IACtB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
@@ -26,7 +26,7 @@ export interface TextToSpeechParameters {
26
26
  /**
27
27
  * Parametrization of the text generation process
28
28
  */
29
- generate?: GenerationParameters;
29
+ generation_parameters?: GenerationParameters;
30
30
  [property: string]: unknown;
31
31
  }
32
32
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-speech/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,sBAAsB,CAAC;IACpC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,QAAQ,CAAC,EAAE,oBAAoB,CAAC;IAChC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;;;GAIG;AACH,MAAM,WAAW,kBAAkB;IAClC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,YAAY,EAAE,OAAO,CAAC;IACtB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-speech/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH;;GAEG;AACH,MAAM,WAAW,iBAAiB;IACjC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,CAAC,EAAE,sBAAsB,CAAC;IACpC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,qBAAqB,CAAC,EAAE,oBAAoB,CAAC;IAC7C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;;;GAIG;AACH,MAAM,WAAW,oBAAoB;IACpC;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;;;OAKG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;;;;;;OAQG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;OAGG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;;;;OAMG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED;;GAEG;AACH,MAAM,MAAM,kBAAkB,GAAG,OAAO,GAAG,OAAO,CAAC;AAEnD;;;;GAIG;AACH,MAAM,WAAW,kBAAkB;IAClC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,YAAY,EAAE,OAAO,CAAC;IACtB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.12.10",
4
+ "version": "0.12.11",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
@@ -10,7 +10,7 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): stri
10
10
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`;
11
11
 
12
12
  export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
13
- if (model.config?.tokenizer_config?.chat_template) {
13
+ if (model.tags.includes("conversational")) {
14
14
  // Conversational model detected, so we display a code snippet that features the Messages API
15
15
  return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
16
16
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
@@ -27,6 +27,32 @@ export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: stri
27
27
  }
28
28
  };
29
29
 
30
+ export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
31
+ if (model.tags.includes("conversational")) {
32
+ // Conversational model detected, so we display a code snippet that features the Messages API
33
+ return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
34
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
35
+ -H 'Content-Type: application/json' \\
36
+ -d '{
37
+ "model": "${model.id}",
38
+ "messages": [
39
+ {
40
+ "role": "user",
41
+ "content": [
42
+ {"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
43
+ {"type": "text", "text": "Describe this image in one sentence."}
44
+ ]
45
+ }
46
+ ],
47
+ "max_tokens": 500,
48
+ "stream": false
49
+ }'
50
+ `;
51
+ } else {
52
+ return snippetBasic(model, accessToken);
53
+ }
54
+ };
55
+
30
56
  export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
31
57
  `curl https://api-inference.huggingface.co/models/${model.id} \\
32
58
  -X POST \\
@@ -51,6 +77,7 @@ export const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal
51
77
  summarization: snippetBasic,
52
78
  "feature-extraction": snippetBasic,
53
79
  "text-generation": snippetTextGeneration,
80
+ "image-text-to-text": snippetImageTextToTextGeneration,
54
81
  "text2text-generation": snippetBasic,
55
82
  "fill-mask": snippetBasic,
56
83
  "sentence-similarity": snippetBasic,
@@ -24,7 +24,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
24
24
  });`;
25
25
 
26
26
  export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
27
- if (model.config?.tokenizer_config?.chat_template) {
27
+ if (model.tags.includes("conversational")) {
28
28
  // Conversational model detected, so we display a code snippet that features the Messages API
29
29
  return `import { HfInference } from "@huggingface/inference";
30
30
 
@@ -41,6 +41,35 @@ for await (const chunk of inference.chatCompletionStream({
41
41
  return snippetBasic(model, accessToken);
42
42
  }
43
43
  };
44
+
45
+ export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
46
+ if (model.tags.includes("conversational")) {
47
+ // Conversational model detected, so we display a code snippet that features the Messages API
48
+ return `import { HfInference } from "@huggingface/inference";
49
+
50
+ const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
51
+ const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
52
+
53
+ for await (const chunk of inference.chatCompletionStream({
54
+ model: "${model.id}",
55
+ messages: [
56
+ {
57
+ "role": "user",
58
+ "content": [
59
+ {"type": "image_url", "image_url": {"url": imageUrl}},
60
+ {"type": "text", "text": "Describe this image in one sentence."},
61
+ ],
62
+ }
63
+ ],
64
+ max_tokens: 500,
65
+ })) {
66
+ process.stdout.write(chunk.choices[0]?.delta?.content || "");
67
+ }`;
68
+ } else {
69
+ return snippetBasic(model, accessToken);
70
+ }
71
+ };
72
+
44
73
  export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
45
74
  `async function query(data) {
46
75
  const response = await fetch(
@@ -156,6 +185,7 @@ export const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal,
156
185
  summarization: snippetBasic,
157
186
  "feature-extraction": snippetBasic,
158
187
  "text-generation": snippetTextGeneration,
188
+ "image-text-to-text": snippetImageTextToTextGeneration,
159
189
  "text2text-generation": snippetBasic,
160
190
  "fill-mask": snippetBasic,
161
191
  "sentence-similarity": snippetBasic,
@@ -5,18 +5,39 @@ import type { ModelDataMinimal } from "./types.js";
5
5
  export const snippetConversational = (model: ModelDataMinimal, accessToken: string): string =>
6
6
  `from huggingface_hub import InferenceClient
7
7
 
8
- client = InferenceClient(
9
- "${model.id}",
10
- token="${accessToken || "{API_TOKEN}"}",
11
- )
8
+ client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
12
9
 
13
10
  for message in client.chat_completion(
11
+ model="${model.id}",
14
12
  messages=[{"role": "user", "content": "What is the capital of France?"}],
15
13
  max_tokens=500,
16
14
  stream=True,
17
15
  ):
18
16
  print(message.choices[0].delta.content, end="")`;
19
17
 
18
+ export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): string =>
19
+ `from huggingface_hub import InferenceClient
20
+
21
+ client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
22
+
23
+ image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
24
+
25
+ for message in client.chat_completion(
26
+ model="${model.id}",
27
+ messages=[
28
+ {
29
+ "role": "user",
30
+ "content": [
31
+ {"type": "image_url", "image_url": {"url": image_url}},
32
+ {"type": "text", "text": "Describe this image in one sentence."},
33
+ ],
34
+ }
35
+ ],
36
+ max_tokens=500,
37
+ stream=True,
38
+ ):
39
+ print(message.choices[0].delta.content, end="")`;
40
+
20
41
  export const snippetZeroShotClassification = (model: ModelDataMinimal): string =>
21
42
  `def query(payload):
22
43
  response = requests.post(API_URL, headers=headers, json=payload)
@@ -153,9 +174,12 @@ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinim
153
174
  };
154
175
 
155
176
  export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string {
156
- if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) {
177
+ if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
157
178
  // Conversational model detected, so we display a code snippet that features the Messages API
158
179
  return snippetConversational(model, accessToken);
180
+ } else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
181
+ // Example sending an image to the Message API
182
+ return snippetConversationalWithImage(model, accessToken);
159
183
  } else {
160
184
  const body =
161
185
  model.pipeline_tag && model.pipeline_tag in pythonSnippets
@@ -5,4 +5,7 @@ import type { ModelData } from "../model-data";
5
5
  *
6
6
  * Add more fields as needed.
7
7
  */
8
- export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config">;
8
+ export type ModelDataMinimal = Pick<
9
+ ModelData,
10
+ "id" | "pipeline_tag" | "mask_token" | "library_name" | "config" | "tags"
11
+ >;
@@ -29,7 +29,7 @@ export interface AutomaticSpeechRecognitionParameters {
29
29
  /**
30
30
  * Parametrization of the text generation process
31
31
  */
32
- generate?: GenerationParameters;
32
+ generation_parameters?: GenerationParameters;
33
33
  /**
34
34
  * Whether to output corresponding timestamps with the generated text
35
35
  */
@@ -24,7 +24,7 @@
24
24
  "type": "boolean",
25
25
  "description": "Whether to output corresponding timestamps with the generated text"
26
26
  },
27
- "generate": {
27
+ "generation_parameters": {
28
28
  "description": "Parametrization of the text generation process",
29
29
  "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters"
30
30
  }
@@ -28,7 +28,7 @@ export interface ImageToTextParameters {
28
28
  /**
29
29
  * Parametrization of the text generation process
30
30
  */
31
- generate?: GenerationParameters;
31
+ generation_parameters?: GenerationParameters;
32
32
  /**
33
33
  * The amount of maximum tokens to generate.
34
34
  */
@@ -23,7 +23,7 @@
23
23
  "type": "integer",
24
24
  "description": "The amount of maximum tokens to generate."
25
25
  },
26
- "generate": {
26
+ "generation_parameters": {
27
27
  "description": "Parametrization of the text generation process",
28
28
  "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters"
29
29
  }
@@ -28,7 +28,7 @@ export interface TextToAudioParameters {
28
28
  /**
29
29
  * Parametrization of the text generation process
30
30
  */
31
- generate?: GenerationParameters;
31
+ generation_parameters?: GenerationParameters;
32
32
  [property: string]: unknown;
33
33
  }
34
34
 
@@ -20,7 +20,7 @@
20
20
  "description": "Additional inference parameters for Text To Audio",
21
21
  "type": "object",
22
22
  "properties": {
23
- "generate": {
23
+ "generation_parameters": {
24
24
  "description": "Parametrization of the text generation process",
25
25
  "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters"
26
26
  }
@@ -28,7 +28,7 @@ export interface TextToSpeechParameters {
28
28
  /**
29
29
  * Parametrization of the text generation process
30
30
  */
31
- generate?: GenerationParameters;
31
+ generation_parameters?: GenerationParameters;
32
32
  [property: string]: unknown;
33
33
  }
34
34
 
@@ -20,7 +20,7 @@
20
20
  "description": "Additional inference parameters for Text To Speech",
21
21
  "type": "object",
22
22
  "properties": {
23
- "generate": {
23
+ "generation_parameters": {
24
24
  "description": "Parametrization of the text generation process",
25
25
  "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters"
26
26
  }