@huggingface/tasks 0.12.26 → 0.12.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +46 -92
- package/dist/index.js +46 -92
- package/dist/src/snippets/curl.d.ts +0 -1
- package/dist/src/snippets/curl.d.ts.map +1 -1
- package/dist/src/snippets/js.d.ts +0 -1
- package/dist/src/snippets/js.d.ts.map +1 -1
- package/dist/src/snippets/python.d.ts +0 -1
- package/dist/src/snippets/python.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/snippets/curl.ts +19 -32
- package/src/snippets/js.ts +19 -34
- package/src/snippets/python.ts +20 -32
package/dist/index.cjs
CHANGED
|
@@ -6329,7 +6329,6 @@ __export(curl_exports, {
|
|
|
6329
6329
|
hasCurlInferenceSnippet: () => hasCurlInferenceSnippet,
|
|
6330
6330
|
snippetBasic: () => snippetBasic,
|
|
6331
6331
|
snippetFile: () => snippetFile,
|
|
6332
|
-
snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration,
|
|
6333
6332
|
snippetTextGeneration: () => snippetTextGeneration,
|
|
6334
6333
|
snippetZeroShotClassification: () => snippetZeroShotClassification
|
|
6335
6334
|
});
|
|
@@ -6377,9 +6376,21 @@ var snippetBasic = (model, accessToken) => ({
|
|
|
6377
6376
|
var snippetTextGeneration = (model, accessToken, opts) => {
|
|
6378
6377
|
if (model.tags.includes("conversational")) {
|
|
6379
6378
|
const streaming = opts?.streaming ?? true;
|
|
6380
|
-
const
|
|
6381
|
-
{
|
|
6379
|
+
const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
|
|
6380
|
+
{
|
|
6381
|
+
role: "user",
|
|
6382
|
+
content: [
|
|
6383
|
+
{
|
|
6384
|
+
type: "image_url",
|
|
6385
|
+
image_url: {
|
|
6386
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
6387
|
+
}
|
|
6388
|
+
},
|
|
6389
|
+
{ type: "text", text: "Describe this image in one sentence." }
|
|
6390
|
+
]
|
|
6391
|
+
}
|
|
6382
6392
|
];
|
|
6393
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
6383
6394
|
const config = {
|
|
6384
6395
|
...opts?.temperature ? { temperature: opts.temperature } : void 0,
|
|
6385
6396
|
max_tokens: opts?.max_tokens ?? 500,
|
|
@@ -6414,32 +6425,6 @@ var snippetTextGeneration = (model, accessToken, opts) => {
|
|
|
6414
6425
|
return snippetBasic(model, accessToken);
|
|
6415
6426
|
}
|
|
6416
6427
|
};
|
|
6417
|
-
var snippetImageTextToTextGeneration = (model, accessToken) => {
|
|
6418
|
-
if (model.tags.includes("conversational")) {
|
|
6419
|
-
return {
|
|
6420
|
-
content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
|
|
6421
|
-
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
|
|
6422
|
-
-H 'Content-Type: application/json' \\
|
|
6423
|
-
-d '{
|
|
6424
|
-
"model": "${model.id}",
|
|
6425
|
-
"messages": [
|
|
6426
|
-
{
|
|
6427
|
-
"role": "user",
|
|
6428
|
-
"content": [
|
|
6429
|
-
{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
|
|
6430
|
-
{"type": "text", "text": "Describe this image in one sentence."}
|
|
6431
|
-
]
|
|
6432
|
-
}
|
|
6433
|
-
],
|
|
6434
|
-
"max_tokens": 500,
|
|
6435
|
-
"stream": false
|
|
6436
|
-
}'
|
|
6437
|
-
`
|
|
6438
|
-
};
|
|
6439
|
-
} else {
|
|
6440
|
-
return snippetBasic(model, accessToken);
|
|
6441
|
-
}
|
|
6442
|
-
};
|
|
6443
6428
|
var snippetZeroShotClassification = (model, accessToken) => ({
|
|
6444
6429
|
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
6445
6430
|
-X POST \\
|
|
@@ -6464,7 +6449,7 @@ var curlSnippets = {
|
|
|
6464
6449
|
summarization: snippetBasic,
|
|
6465
6450
|
"feature-extraction": snippetBasic,
|
|
6466
6451
|
"text-generation": snippetTextGeneration,
|
|
6467
|
-
"image-text-to-text":
|
|
6452
|
+
"image-text-to-text": snippetTextGeneration,
|
|
6468
6453
|
"text2text-generation": snippetBasic,
|
|
6469
6454
|
"fill-mask": snippetBasic,
|
|
6470
6455
|
"sentence-similarity": snippetBasic,
|
|
@@ -6494,7 +6479,6 @@ __export(python_exports, {
|
|
|
6494
6479
|
pythonSnippets: () => pythonSnippets,
|
|
6495
6480
|
snippetBasic: () => snippetBasic2,
|
|
6496
6481
|
snippetConversational: () => snippetConversational,
|
|
6497
|
-
snippetConversationalWithImage: () => snippetConversationalWithImage,
|
|
6498
6482
|
snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
|
|
6499
6483
|
snippetFile: () => snippetFile2,
|
|
6500
6484
|
snippetTabular: () => snippetTabular,
|
|
@@ -6505,9 +6489,21 @@ __export(python_exports, {
|
|
|
6505
6489
|
});
|
|
6506
6490
|
var snippetConversational = (model, accessToken, opts) => {
|
|
6507
6491
|
const streaming = opts?.streaming ?? true;
|
|
6508
|
-
const
|
|
6509
|
-
{
|
|
6492
|
+
const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
|
|
6493
|
+
{
|
|
6494
|
+
role: "user",
|
|
6495
|
+
content: [
|
|
6496
|
+
{
|
|
6497
|
+
type: "image_url",
|
|
6498
|
+
image_url: {
|
|
6499
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
6500
|
+
}
|
|
6501
|
+
},
|
|
6502
|
+
{ type: "text", text: "Describe this image in one sentence." }
|
|
6503
|
+
]
|
|
6504
|
+
}
|
|
6510
6505
|
];
|
|
6506
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
6511
6507
|
const messagesStr = stringifyMessages(messages, {
|
|
6512
6508
|
sep: ",\n ",
|
|
6513
6509
|
start: `[
|
|
@@ -6609,29 +6605,6 @@ print(completion.choices[0].message)`
|
|
|
6609
6605
|
];
|
|
6610
6606
|
}
|
|
6611
6607
|
};
|
|
6612
|
-
var snippetConversationalWithImage = (model, accessToken) => ({
|
|
6613
|
-
content: `from huggingface_hub import InferenceClient
|
|
6614
|
-
|
|
6615
|
-
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
|
|
6616
|
-
|
|
6617
|
-
image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
6618
|
-
|
|
6619
|
-
for message in client.chat_completion(
|
|
6620
|
-
model="${model.id}",
|
|
6621
|
-
messages=[
|
|
6622
|
-
{
|
|
6623
|
-
"role": "user",
|
|
6624
|
-
"content": [
|
|
6625
|
-
{"type": "image_url", "image_url": {"url": image_url}},
|
|
6626
|
-
{"type": "text", "text": "Describe this image in one sentence."},
|
|
6627
|
-
],
|
|
6628
|
-
}
|
|
6629
|
-
],
|
|
6630
|
-
max_tokens=500,
|
|
6631
|
-
stream=True,
|
|
6632
|
-
):
|
|
6633
|
-
print(message.choices[0].delta.content, end="")`
|
|
6634
|
-
});
|
|
6635
6608
|
var snippetZeroShotClassification2 = (model) => ({
|
|
6636
6609
|
content: `def query(payload):
|
|
6637
6610
|
response = requests.post(API_URL, headers=headers, json=payload)
|
|
@@ -6749,7 +6722,7 @@ var pythonSnippets = {
|
|
|
6749
6722
|
"feature-extraction": snippetBasic2,
|
|
6750
6723
|
"text-generation": snippetBasic2,
|
|
6751
6724
|
"text2text-generation": snippetBasic2,
|
|
6752
|
-
"image-text-to-text":
|
|
6725
|
+
"image-text-to-text": snippetConversational,
|
|
6753
6726
|
"fill-mask": snippetBasic2,
|
|
6754
6727
|
"sentence-similarity": snippetBasic2,
|
|
6755
6728
|
"automatic-speech-recognition": snippetFile2,
|
|
@@ -6768,10 +6741,8 @@ var pythonSnippets = {
|
|
|
6768
6741
|
"zero-shot-image-classification": snippetZeroShotImageClassification
|
|
6769
6742
|
};
|
|
6770
6743
|
function getPythonInferenceSnippet(model, accessToken, opts) {
|
|
6771
|
-
if (model.
|
|
6744
|
+
if (model.tags.includes("conversational")) {
|
|
6772
6745
|
return snippetConversational(model, accessToken, opts);
|
|
6773
|
-
} else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
|
|
6774
|
-
return snippetConversationalWithImage(model, accessToken);
|
|
6775
6746
|
} else {
|
|
6776
6747
|
let snippets = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" } : { content: "" };
|
|
6777
6748
|
snippets = Array.isArray(snippets) ? snippets : [snippets];
|
|
@@ -6800,7 +6771,6 @@ __export(js_exports, {
|
|
|
6800
6771
|
jsSnippets: () => jsSnippets,
|
|
6801
6772
|
snippetBasic: () => snippetBasic3,
|
|
6802
6773
|
snippetFile: () => snippetFile3,
|
|
6803
|
-
snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration2,
|
|
6804
6774
|
snippetTextGeneration: () => snippetTextGeneration2,
|
|
6805
6775
|
snippetTextToAudio: () => snippetTextToAudio2,
|
|
6806
6776
|
snippetTextToImage: () => snippetTextToImage2,
|
|
@@ -6830,9 +6800,21 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
|
|
|
6830
6800
|
var snippetTextGeneration2 = (model, accessToken, opts) => {
|
|
6831
6801
|
if (model.tags.includes("conversational")) {
|
|
6832
6802
|
const streaming = opts?.streaming ?? true;
|
|
6833
|
-
const
|
|
6834
|
-
{
|
|
6803
|
+
const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
|
|
6804
|
+
{
|
|
6805
|
+
role: "user",
|
|
6806
|
+
content: [
|
|
6807
|
+
{
|
|
6808
|
+
type: "image_url",
|
|
6809
|
+
image_url: {
|
|
6810
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
6811
|
+
}
|
|
6812
|
+
},
|
|
6813
|
+
{ type: "text", text: "Describe this image in one sentence." }
|
|
6814
|
+
]
|
|
6815
|
+
}
|
|
6835
6816
|
];
|
|
6817
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
6836
6818
|
const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
|
|
6837
6819
|
const config = {
|
|
6838
6820
|
...opts?.temperature ? { temperature: opts.temperature } : void 0,
|
|
@@ -6935,34 +6917,6 @@ console.log(chatCompletion.choices[0].message);`
|
|
|
6935
6917
|
return snippetBasic3(model, accessToken);
|
|
6936
6918
|
}
|
|
6937
6919
|
};
|
|
6938
|
-
var snippetImageTextToTextGeneration2 = (model, accessToken) => {
|
|
6939
|
-
if (model.tags.includes("conversational")) {
|
|
6940
|
-
return {
|
|
6941
|
-
content: `import { HfInference } from "@huggingface/inference";
|
|
6942
|
-
|
|
6943
|
-
const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
6944
|
-
const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
|
|
6945
|
-
|
|
6946
|
-
for await (const chunk of inference.chatCompletionStream({
|
|
6947
|
-
model: "${model.id}",
|
|
6948
|
-
messages: [
|
|
6949
|
-
{
|
|
6950
|
-
"role": "user",
|
|
6951
|
-
"content": [
|
|
6952
|
-
{"type": "image_url", "image_url": {"url": imageUrl}},
|
|
6953
|
-
{"type": "text", "text": "Describe this image in one sentence."},
|
|
6954
|
-
],
|
|
6955
|
-
}
|
|
6956
|
-
],
|
|
6957
|
-
max_tokens: 500,
|
|
6958
|
-
})) {
|
|
6959
|
-
process.stdout.write(chunk.choices[0]?.delta?.content || "");
|
|
6960
|
-
}`
|
|
6961
|
-
};
|
|
6962
|
-
} else {
|
|
6963
|
-
return snippetBasic3(model, accessToken);
|
|
6964
|
-
}
|
|
6965
|
-
};
|
|
6966
6920
|
var snippetZeroShotClassification3 = (model, accessToken) => ({
|
|
6967
6921
|
content: `async function query(data) {
|
|
6968
6922
|
const response = await fetch(
|
|
@@ -7075,7 +7029,7 @@ var jsSnippets = {
|
|
|
7075
7029
|
summarization: snippetBasic3,
|
|
7076
7030
|
"feature-extraction": snippetBasic3,
|
|
7077
7031
|
"text-generation": snippetTextGeneration2,
|
|
7078
|
-
"image-text-to-text":
|
|
7032
|
+
"image-text-to-text": snippetTextGeneration2,
|
|
7079
7033
|
"text2text-generation": snippetBasic3,
|
|
7080
7034
|
"fill-mask": snippetBasic3,
|
|
7081
7035
|
"sentence-similarity": snippetBasic3,
|
package/dist/index.js
CHANGED
|
@@ -6291,7 +6291,6 @@ __export(curl_exports, {
|
|
|
6291
6291
|
hasCurlInferenceSnippet: () => hasCurlInferenceSnippet,
|
|
6292
6292
|
snippetBasic: () => snippetBasic,
|
|
6293
6293
|
snippetFile: () => snippetFile,
|
|
6294
|
-
snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration,
|
|
6295
6294
|
snippetTextGeneration: () => snippetTextGeneration,
|
|
6296
6295
|
snippetZeroShotClassification: () => snippetZeroShotClassification
|
|
6297
6296
|
});
|
|
@@ -6339,9 +6338,21 @@ var snippetBasic = (model, accessToken) => ({
|
|
|
6339
6338
|
var snippetTextGeneration = (model, accessToken, opts) => {
|
|
6340
6339
|
if (model.tags.includes("conversational")) {
|
|
6341
6340
|
const streaming = opts?.streaming ?? true;
|
|
6342
|
-
const
|
|
6343
|
-
{
|
|
6341
|
+
const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
|
|
6342
|
+
{
|
|
6343
|
+
role: "user",
|
|
6344
|
+
content: [
|
|
6345
|
+
{
|
|
6346
|
+
type: "image_url",
|
|
6347
|
+
image_url: {
|
|
6348
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
6349
|
+
}
|
|
6350
|
+
},
|
|
6351
|
+
{ type: "text", text: "Describe this image in one sentence." }
|
|
6352
|
+
]
|
|
6353
|
+
}
|
|
6344
6354
|
];
|
|
6355
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
6345
6356
|
const config = {
|
|
6346
6357
|
...opts?.temperature ? { temperature: opts.temperature } : void 0,
|
|
6347
6358
|
max_tokens: opts?.max_tokens ?? 500,
|
|
@@ -6376,32 +6387,6 @@ var snippetTextGeneration = (model, accessToken, opts) => {
|
|
|
6376
6387
|
return snippetBasic(model, accessToken);
|
|
6377
6388
|
}
|
|
6378
6389
|
};
|
|
6379
|
-
var snippetImageTextToTextGeneration = (model, accessToken) => {
|
|
6380
|
-
if (model.tags.includes("conversational")) {
|
|
6381
|
-
return {
|
|
6382
|
-
content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
|
|
6383
|
-
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
|
|
6384
|
-
-H 'Content-Type: application/json' \\
|
|
6385
|
-
-d '{
|
|
6386
|
-
"model": "${model.id}",
|
|
6387
|
-
"messages": [
|
|
6388
|
-
{
|
|
6389
|
-
"role": "user",
|
|
6390
|
-
"content": [
|
|
6391
|
-
{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
|
|
6392
|
-
{"type": "text", "text": "Describe this image in one sentence."}
|
|
6393
|
-
]
|
|
6394
|
-
}
|
|
6395
|
-
],
|
|
6396
|
-
"max_tokens": 500,
|
|
6397
|
-
"stream": false
|
|
6398
|
-
}'
|
|
6399
|
-
`
|
|
6400
|
-
};
|
|
6401
|
-
} else {
|
|
6402
|
-
return snippetBasic(model, accessToken);
|
|
6403
|
-
}
|
|
6404
|
-
};
|
|
6405
6390
|
var snippetZeroShotClassification = (model, accessToken) => ({
|
|
6406
6391
|
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
6407
6392
|
-X POST \\
|
|
@@ -6426,7 +6411,7 @@ var curlSnippets = {
|
|
|
6426
6411
|
summarization: snippetBasic,
|
|
6427
6412
|
"feature-extraction": snippetBasic,
|
|
6428
6413
|
"text-generation": snippetTextGeneration,
|
|
6429
|
-
"image-text-to-text":
|
|
6414
|
+
"image-text-to-text": snippetTextGeneration,
|
|
6430
6415
|
"text2text-generation": snippetBasic,
|
|
6431
6416
|
"fill-mask": snippetBasic,
|
|
6432
6417
|
"sentence-similarity": snippetBasic,
|
|
@@ -6456,7 +6441,6 @@ __export(python_exports, {
|
|
|
6456
6441
|
pythonSnippets: () => pythonSnippets,
|
|
6457
6442
|
snippetBasic: () => snippetBasic2,
|
|
6458
6443
|
snippetConversational: () => snippetConversational,
|
|
6459
|
-
snippetConversationalWithImage: () => snippetConversationalWithImage,
|
|
6460
6444
|
snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
|
|
6461
6445
|
snippetFile: () => snippetFile2,
|
|
6462
6446
|
snippetTabular: () => snippetTabular,
|
|
@@ -6467,9 +6451,21 @@ __export(python_exports, {
|
|
|
6467
6451
|
});
|
|
6468
6452
|
var snippetConversational = (model, accessToken, opts) => {
|
|
6469
6453
|
const streaming = opts?.streaming ?? true;
|
|
6470
|
-
const
|
|
6471
|
-
{
|
|
6454
|
+
const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
|
|
6455
|
+
{
|
|
6456
|
+
role: "user",
|
|
6457
|
+
content: [
|
|
6458
|
+
{
|
|
6459
|
+
type: "image_url",
|
|
6460
|
+
image_url: {
|
|
6461
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
6462
|
+
}
|
|
6463
|
+
},
|
|
6464
|
+
{ type: "text", text: "Describe this image in one sentence." }
|
|
6465
|
+
]
|
|
6466
|
+
}
|
|
6472
6467
|
];
|
|
6468
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
6473
6469
|
const messagesStr = stringifyMessages(messages, {
|
|
6474
6470
|
sep: ",\n ",
|
|
6475
6471
|
start: `[
|
|
@@ -6571,29 +6567,6 @@ print(completion.choices[0].message)`
|
|
|
6571
6567
|
];
|
|
6572
6568
|
}
|
|
6573
6569
|
};
|
|
6574
|
-
var snippetConversationalWithImage = (model, accessToken) => ({
|
|
6575
|
-
content: `from huggingface_hub import InferenceClient
|
|
6576
|
-
|
|
6577
|
-
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
|
|
6578
|
-
|
|
6579
|
-
image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
6580
|
-
|
|
6581
|
-
for message in client.chat_completion(
|
|
6582
|
-
model="${model.id}",
|
|
6583
|
-
messages=[
|
|
6584
|
-
{
|
|
6585
|
-
"role": "user",
|
|
6586
|
-
"content": [
|
|
6587
|
-
{"type": "image_url", "image_url": {"url": image_url}},
|
|
6588
|
-
{"type": "text", "text": "Describe this image in one sentence."},
|
|
6589
|
-
],
|
|
6590
|
-
}
|
|
6591
|
-
],
|
|
6592
|
-
max_tokens=500,
|
|
6593
|
-
stream=True,
|
|
6594
|
-
):
|
|
6595
|
-
print(message.choices[0].delta.content, end="")`
|
|
6596
|
-
});
|
|
6597
6570
|
var snippetZeroShotClassification2 = (model) => ({
|
|
6598
6571
|
content: `def query(payload):
|
|
6599
6572
|
response = requests.post(API_URL, headers=headers, json=payload)
|
|
@@ -6711,7 +6684,7 @@ var pythonSnippets = {
|
|
|
6711
6684
|
"feature-extraction": snippetBasic2,
|
|
6712
6685
|
"text-generation": snippetBasic2,
|
|
6713
6686
|
"text2text-generation": snippetBasic2,
|
|
6714
|
-
"image-text-to-text":
|
|
6687
|
+
"image-text-to-text": snippetConversational,
|
|
6715
6688
|
"fill-mask": snippetBasic2,
|
|
6716
6689
|
"sentence-similarity": snippetBasic2,
|
|
6717
6690
|
"automatic-speech-recognition": snippetFile2,
|
|
@@ -6730,10 +6703,8 @@ var pythonSnippets = {
|
|
|
6730
6703
|
"zero-shot-image-classification": snippetZeroShotImageClassification
|
|
6731
6704
|
};
|
|
6732
6705
|
function getPythonInferenceSnippet(model, accessToken, opts) {
|
|
6733
|
-
if (model.
|
|
6706
|
+
if (model.tags.includes("conversational")) {
|
|
6734
6707
|
return snippetConversational(model, accessToken, opts);
|
|
6735
|
-
} else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
|
|
6736
|
-
return snippetConversationalWithImage(model, accessToken);
|
|
6737
6708
|
} else {
|
|
6738
6709
|
let snippets = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? { content: "" } : { content: "" };
|
|
6739
6710
|
snippets = Array.isArray(snippets) ? snippets : [snippets];
|
|
@@ -6762,7 +6733,6 @@ __export(js_exports, {
|
|
|
6762
6733
|
jsSnippets: () => jsSnippets,
|
|
6763
6734
|
snippetBasic: () => snippetBasic3,
|
|
6764
6735
|
snippetFile: () => snippetFile3,
|
|
6765
|
-
snippetImageTextToTextGeneration: () => snippetImageTextToTextGeneration2,
|
|
6766
6736
|
snippetTextGeneration: () => snippetTextGeneration2,
|
|
6767
6737
|
snippetTextToAudio: () => snippetTextToAudio2,
|
|
6768
6738
|
snippetTextToImage: () => snippetTextToImage2,
|
|
@@ -6792,9 +6762,21 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
|
|
|
6792
6762
|
var snippetTextGeneration2 = (model, accessToken, opts) => {
|
|
6793
6763
|
if (model.tags.includes("conversational")) {
|
|
6794
6764
|
const streaming = opts?.streaming ?? true;
|
|
6795
|
-
const
|
|
6796
|
-
{
|
|
6765
|
+
const exampleMessages = model.pipeline_tag === "text-generation" ? [{ role: "user", content: "What is the capital of France?" }] : [
|
|
6766
|
+
{
|
|
6767
|
+
role: "user",
|
|
6768
|
+
content: [
|
|
6769
|
+
{
|
|
6770
|
+
type: "image_url",
|
|
6771
|
+
image_url: {
|
|
6772
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
6773
|
+
}
|
|
6774
|
+
},
|
|
6775
|
+
{ type: "text", text: "Describe this image in one sentence." }
|
|
6776
|
+
]
|
|
6777
|
+
}
|
|
6797
6778
|
];
|
|
6779
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
6798
6780
|
const messagesStr = stringifyMessages(messages, { sep: ",\n ", start: "[\n ", end: "\n ]" });
|
|
6799
6781
|
const config = {
|
|
6800
6782
|
...opts?.temperature ? { temperature: opts.temperature } : void 0,
|
|
@@ -6897,34 +6879,6 @@ console.log(chatCompletion.choices[0].message);`
|
|
|
6897
6879
|
return snippetBasic3(model, accessToken);
|
|
6898
6880
|
}
|
|
6899
6881
|
};
|
|
6900
|
-
var snippetImageTextToTextGeneration2 = (model, accessToken) => {
|
|
6901
|
-
if (model.tags.includes("conversational")) {
|
|
6902
|
-
return {
|
|
6903
|
-
content: `import { HfInference } from "@huggingface/inference";
|
|
6904
|
-
|
|
6905
|
-
const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
6906
|
-
const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
|
|
6907
|
-
|
|
6908
|
-
for await (const chunk of inference.chatCompletionStream({
|
|
6909
|
-
model: "${model.id}",
|
|
6910
|
-
messages: [
|
|
6911
|
-
{
|
|
6912
|
-
"role": "user",
|
|
6913
|
-
"content": [
|
|
6914
|
-
{"type": "image_url", "image_url": {"url": imageUrl}},
|
|
6915
|
-
{"type": "text", "text": "Describe this image in one sentence."},
|
|
6916
|
-
],
|
|
6917
|
-
}
|
|
6918
|
-
],
|
|
6919
|
-
max_tokens: 500,
|
|
6920
|
-
})) {
|
|
6921
|
-
process.stdout.write(chunk.choices[0]?.delta?.content || "");
|
|
6922
|
-
}`
|
|
6923
|
-
};
|
|
6924
|
-
} else {
|
|
6925
|
-
return snippetBasic3(model, accessToken);
|
|
6926
|
-
}
|
|
6927
|
-
};
|
|
6928
6882
|
var snippetZeroShotClassification3 = (model, accessToken) => ({
|
|
6929
6883
|
content: `async function query(data) {
|
|
6930
6884
|
const response = await fetch(
|
|
@@ -7037,7 +6991,7 @@ var jsSnippets = {
|
|
|
7037
6991
|
summarization: snippetBasic3,
|
|
7038
6992
|
"feature-extraction": snippetBasic3,
|
|
7039
6993
|
"text-generation": snippetTextGeneration2,
|
|
7040
|
-
"image-text-to-text":
|
|
6994
|
+
"image-text-to-text": snippetTextGeneration2,
|
|
7041
6995
|
"text2text-generation": snippetBasic3,
|
|
7042
6996
|
"fill-mask": snippetBasic3,
|
|
7043
6997
|
"sentence-similarity": snippetBasic3,
|
|
@@ -9,7 +9,6 @@ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToke
|
|
|
9
9
|
max_tokens?: GenerationParameters["max_tokens"];
|
|
10
10
|
top_p?: GenerationParameters["top_p"];
|
|
11
11
|
}) => InferenceSnippet;
|
|
12
|
-
export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
13
12
|
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
14
13
|
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
15
14
|
export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet>>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,
|
|
1
|
+
{"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAsDF,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAM3F,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAKzE,CAAC;AAEH,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,gBAAgB,CAClG,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,gBAAgB,CAItG;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
|
|
@@ -9,7 +9,6 @@ export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToke
|
|
|
9
9
|
max_tokens?: GenerationParameters["max_tokens"];
|
|
10
10
|
top_p?: GenerationParameters["top_p"];
|
|
11
11
|
}) => InferenceSnippet | InferenceSnippet[];
|
|
12
|
-
export declare const snippetImageTextToTextGeneration: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
13
12
|
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
14
13
|
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
15
14
|
export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAoB1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,GAAG,gBAAgB,
|
|
1
|
+
{"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAoB1E,CAAC;AAEH,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,GAAG,gBAAgB,EA6HrC,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAsB3F,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAmBhF,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAuCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAqBzE,CAAC;AAEH,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,GACjB,gBAAgB,GAAG,gBAAgB,EAAE,CAIvC;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
|
|
@@ -8,7 +8,6 @@ export declare const snippetConversational: (model: ModelDataMinimal, accessToke
|
|
|
8
8
|
max_tokens?: GenerationParameters["max_tokens"];
|
|
9
9
|
top_p?: GenerationParameters["top_p"];
|
|
10
10
|
}) => InferenceSnippet[];
|
|
11
|
-
export declare const snippetConversationalWithImage: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
12
11
|
export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => InferenceSnippet;
|
|
13
12
|
export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet;
|
|
14
13
|
export declare const snippetBasic: (model: ModelDataMinimal) => InferenceSnippet;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,
|
|
1
|
+
{"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,SACZ;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAwHlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAStE,CAAC;AAEH,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAe3E,CAAC;AAEH,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,gBAQrD,CAAC;AAEH,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAQpD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAW3D,CAAC;AAEH,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAOvD,CAAC;AAEH,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBA+B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAWzE,CAAC;AAEH,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,GAAG,gBAAgB,EAAE,CAC1C,CA8BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,GAAG,gBAAgB,EAAE,CAwBvC;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/tasks",
|
|
3
3
|
"packageManager": "pnpm@8.10.5",
|
|
4
|
-
"version": "0.12.
|
|
4
|
+
"version": "0.12.27",
|
|
5
5
|
"description": "List of ML tasks for huggingface.co/tasks",
|
|
6
6
|
"repository": "https://github.com/huggingface/huggingface.js.git",
|
|
7
7
|
"publishConfig": {
|
package/src/snippets/curl.ts
CHANGED
|
@@ -26,9 +26,24 @@ export const snippetTextGeneration = (
|
|
|
26
26
|
if (model.tags.includes("conversational")) {
|
|
27
27
|
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
28
28
|
const streaming = opts?.streaming ?? true;
|
|
29
|
-
const
|
|
30
|
-
|
|
31
|
-
|
|
29
|
+
const exampleMessages: ChatCompletionInputMessage[] =
|
|
30
|
+
model.pipeline_tag === "text-generation"
|
|
31
|
+
? [{ role: "user", content: "What is the capital of France?" }]
|
|
32
|
+
: [
|
|
33
|
+
{
|
|
34
|
+
role: "user",
|
|
35
|
+
content: [
|
|
36
|
+
{
|
|
37
|
+
type: "image_url",
|
|
38
|
+
image_url: {
|
|
39
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
{ type: "text", text: "Describe this image in one sentence." },
|
|
43
|
+
],
|
|
44
|
+
},
|
|
45
|
+
];
|
|
46
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
32
47
|
|
|
33
48
|
const config = {
|
|
34
49
|
...(opts?.temperature ? { temperature: opts.temperature } : undefined),
|
|
@@ -63,34 +78,6 @@ export const snippetTextGeneration = (
|
|
|
63
78
|
}
|
|
64
79
|
};
|
|
65
80
|
|
|
66
|
-
export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
|
|
67
|
-
if (model.tags.includes("conversational")) {
|
|
68
|
-
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
69
|
-
return {
|
|
70
|
-
content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
|
|
71
|
-
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
|
|
72
|
-
-H 'Content-Type: application/json' \\
|
|
73
|
-
-d '{
|
|
74
|
-
"model": "${model.id}",
|
|
75
|
-
"messages": [
|
|
76
|
-
{
|
|
77
|
-
"role": "user",
|
|
78
|
-
"content": [
|
|
79
|
-
{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
|
|
80
|
-
{"type": "text", "text": "Describe this image in one sentence."}
|
|
81
|
-
]
|
|
82
|
-
}
|
|
83
|
-
],
|
|
84
|
-
"max_tokens": 500,
|
|
85
|
-
"stream": false
|
|
86
|
-
}'
|
|
87
|
-
`,
|
|
88
|
-
};
|
|
89
|
-
} else {
|
|
90
|
-
return snippetBasic(model, accessToken);
|
|
91
|
-
}
|
|
92
|
-
};
|
|
93
|
-
|
|
94
81
|
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
|
|
95
82
|
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
96
83
|
-X POST \\
|
|
@@ -122,7 +109,7 @@ export const curlSnippets: Partial<
|
|
|
122
109
|
summarization: snippetBasic,
|
|
123
110
|
"feature-extraction": snippetBasic,
|
|
124
111
|
"text-generation": snippetTextGeneration,
|
|
125
|
-
"image-text-to-text":
|
|
112
|
+
"image-text-to-text": snippetTextGeneration,
|
|
126
113
|
"text2text-generation": snippetBasic,
|
|
127
114
|
"fill-mask": snippetBasic,
|
|
128
115
|
"sentence-similarity": snippetBasic,
|
package/src/snippets/js.ts
CHANGED
|
@@ -40,9 +40,24 @@ export const snippetTextGeneration = (
|
|
|
40
40
|
if (model.tags.includes("conversational")) {
|
|
41
41
|
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
42
42
|
const streaming = opts?.streaming ?? true;
|
|
43
|
-
const
|
|
44
|
-
|
|
45
|
-
|
|
43
|
+
const exampleMessages: ChatCompletionInputMessage[] =
|
|
44
|
+
model.pipeline_tag === "text-generation"
|
|
45
|
+
? [{ role: "user", content: "What is the capital of France?" }]
|
|
46
|
+
: [
|
|
47
|
+
{
|
|
48
|
+
role: "user",
|
|
49
|
+
content: [
|
|
50
|
+
{
|
|
51
|
+
type: "image_url",
|
|
52
|
+
image_url: {
|
|
53
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
|
|
54
|
+
},
|
|
55
|
+
},
|
|
56
|
+
{ type: "text", text: "Describe this image in one sentence." },
|
|
57
|
+
],
|
|
58
|
+
},
|
|
59
|
+
];
|
|
60
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
46
61
|
const messagesStr = stringifyMessages(messages, { sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" });
|
|
47
62
|
|
|
48
63
|
const config = {
|
|
@@ -148,36 +163,6 @@ console.log(chatCompletion.choices[0].message);`,
|
|
|
148
163
|
}
|
|
149
164
|
};
|
|
150
165
|
|
|
151
|
-
export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
|
|
152
|
-
if (model.tags.includes("conversational")) {
|
|
153
|
-
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
154
|
-
return {
|
|
155
|
-
content: `import { HfInference } from "@huggingface/inference";
|
|
156
|
-
|
|
157
|
-
const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
158
|
-
const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";
|
|
159
|
-
|
|
160
|
-
for await (const chunk of inference.chatCompletionStream({
|
|
161
|
-
model: "${model.id}",
|
|
162
|
-
messages: [
|
|
163
|
-
{
|
|
164
|
-
"role": "user",
|
|
165
|
-
"content": [
|
|
166
|
-
{"type": "image_url", "image_url": {"url": imageUrl}},
|
|
167
|
-
{"type": "text", "text": "Describe this image in one sentence."},
|
|
168
|
-
],
|
|
169
|
-
}
|
|
170
|
-
],
|
|
171
|
-
max_tokens: 500,
|
|
172
|
-
})) {
|
|
173
|
-
process.stdout.write(chunk.choices[0]?.delta?.content || "");
|
|
174
|
-
}`,
|
|
175
|
-
};
|
|
176
|
-
} else {
|
|
177
|
-
return snippetBasic(model, accessToken);
|
|
178
|
-
}
|
|
179
|
-
};
|
|
180
|
-
|
|
181
166
|
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
|
|
182
167
|
content: `async function query(data) {
|
|
183
168
|
const response = await fetch(
|
|
@@ -307,7 +292,7 @@ export const jsSnippets: Partial<
|
|
|
307
292
|
summarization: snippetBasic,
|
|
308
293
|
"feature-extraction": snippetBasic,
|
|
309
294
|
"text-generation": snippetTextGeneration,
|
|
310
|
-
"image-text-to-text":
|
|
295
|
+
"image-text-to-text": snippetTextGeneration,
|
|
311
296
|
"text2text-generation": snippetBasic,
|
|
312
297
|
"fill-mask": snippetBasic,
|
|
313
298
|
"sentence-similarity": snippetBasic,
|
package/src/snippets/python.ts
CHANGED
|
@@ -16,9 +16,24 @@ export const snippetConversational = (
|
|
|
16
16
|
}
|
|
17
17
|
): InferenceSnippet[] => {
|
|
18
18
|
const streaming = opts?.streaming ?? true;
|
|
19
|
-
const
|
|
20
|
-
|
|
21
|
-
|
|
19
|
+
const exampleMessages: ChatCompletionInputMessage[] =
|
|
20
|
+
model.pipeline_tag === "text-generation"
|
|
21
|
+
? [{ role: "user", content: "What is the capital of France?" }]
|
|
22
|
+
: [
|
|
23
|
+
{
|
|
24
|
+
role: "user",
|
|
25
|
+
content: [
|
|
26
|
+
{
|
|
27
|
+
type: "image_url",
|
|
28
|
+
image_url: {
|
|
29
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
|
|
30
|
+
},
|
|
31
|
+
},
|
|
32
|
+
{ type: "text", text: "Describe this image in one sentence." },
|
|
33
|
+
],
|
|
34
|
+
},
|
|
35
|
+
];
|
|
36
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
22
37
|
const messagesStr = stringifyMessages(messages, {
|
|
23
38
|
sep: ",\n\t",
|
|
24
39
|
start: `[\n\t`,
|
|
@@ -121,30 +136,6 @@ print(completion.choices[0].message)`,
|
|
|
121
136
|
}
|
|
122
137
|
};
|
|
123
138
|
|
|
124
|
-
export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
|
|
125
|
-
content: `from huggingface_hub import InferenceClient
|
|
126
|
-
|
|
127
|
-
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")
|
|
128
|
-
|
|
129
|
-
image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
130
|
-
|
|
131
|
-
for message in client.chat_completion(
|
|
132
|
-
model="${model.id}",
|
|
133
|
-
messages=[
|
|
134
|
-
{
|
|
135
|
-
"role": "user",
|
|
136
|
-
"content": [
|
|
137
|
-
{"type": "image_url", "image_url": {"url": image_url}},
|
|
138
|
-
{"type": "text", "text": "Describe this image in one sentence."},
|
|
139
|
-
],
|
|
140
|
-
}
|
|
141
|
-
],
|
|
142
|
-
max_tokens=500,
|
|
143
|
-
stream=True,
|
|
144
|
-
):
|
|
145
|
-
print(message.choices[0].delta.content, end="")`,
|
|
146
|
-
});
|
|
147
|
-
|
|
148
139
|
export const snippetZeroShotClassification = (model: ModelDataMinimal): InferenceSnippet => ({
|
|
149
140
|
content: `def query(payload):
|
|
150
141
|
response = requests.post(API_URL, headers=headers, json=payload)
|
|
@@ -282,7 +273,7 @@ export const pythonSnippets: Partial<
|
|
|
282
273
|
"feature-extraction": snippetBasic,
|
|
283
274
|
"text-generation": snippetBasic,
|
|
284
275
|
"text2text-generation": snippetBasic,
|
|
285
|
-
"image-text-to-text":
|
|
276
|
+
"image-text-to-text": snippetConversational,
|
|
286
277
|
"fill-mask": snippetBasic,
|
|
287
278
|
"sentence-similarity": snippetBasic,
|
|
288
279
|
"automatic-speech-recognition": snippetFile,
|
|
@@ -306,12 +297,9 @@ export function getPythonInferenceSnippet(
|
|
|
306
297
|
accessToken: string,
|
|
307
298
|
opts?: Record<string, unknown>
|
|
308
299
|
): InferenceSnippet | InferenceSnippet[] {
|
|
309
|
-
if (model.
|
|
300
|
+
if (model.tags.includes("conversational")) {
|
|
310
301
|
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
311
302
|
return snippetConversational(model, accessToken, opts);
|
|
312
|
-
} else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
|
|
313
|
-
// Example sending an image to the Message API
|
|
314
|
-
return snippetConversationalWithImage(model, accessToken);
|
|
315
303
|
} else {
|
|
316
304
|
let snippets =
|
|
317
305
|
model.pipeline_tag && model.pipeline_tag in pythonSnippets
|