@huggingface/tasks 0.13.1-test → 0.13.1-test2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -2
- package/src/dataset-libraries.ts +89 -0
- package/src/default-widget-inputs.ts +718 -0
- package/src/gguf.ts +40 -0
- package/src/hardware.ts +482 -0
- package/src/index.ts +59 -0
- package/src/library-to-tasks.ts +76 -0
- package/src/local-apps.ts +412 -0
- package/src/model-data.ts +149 -0
- package/src/model-libraries-downloads.ts +18 -0
- package/src/model-libraries-snippets.ts +1128 -0
- package/src/model-libraries.ts +820 -0
- package/src/pipelines.ts +698 -0
- package/src/snippets/common.ts +39 -0
- package/src/snippets/curl.spec.ts +94 -0
- package/src/snippets/curl.ts +120 -0
- package/src/snippets/index.ts +7 -0
- package/src/snippets/inputs.ts +167 -0
- package/src/snippets/js.spec.ts +148 -0
- package/src/snippets/js.ts +305 -0
- package/src/snippets/python.spec.ts +144 -0
- package/src/snippets/python.ts +321 -0
- package/src/snippets/types.ts +16 -0
- package/src/tasks/audio-classification/about.md +86 -0
- package/src/tasks/audio-classification/data.ts +81 -0
- package/src/tasks/audio-classification/inference.ts +52 -0
- package/src/tasks/audio-classification/spec/input.json +35 -0
- package/src/tasks/audio-classification/spec/output.json +11 -0
- package/src/tasks/audio-to-audio/about.md +56 -0
- package/src/tasks/audio-to-audio/data.ts +70 -0
- package/src/tasks/automatic-speech-recognition/about.md +90 -0
- package/src/tasks/automatic-speech-recognition/data.ts +82 -0
- package/src/tasks/automatic-speech-recognition/inference.ts +160 -0
- package/src/tasks/automatic-speech-recognition/spec/input.json +35 -0
- package/src/tasks/automatic-speech-recognition/spec/output.json +38 -0
- package/src/tasks/chat-completion/inference.ts +322 -0
- package/src/tasks/chat-completion/spec/input.json +350 -0
- package/src/tasks/chat-completion/spec/output.json +206 -0
- package/src/tasks/chat-completion/spec/stream_output.json +213 -0
- package/src/tasks/common-definitions.json +100 -0
- package/src/tasks/depth-estimation/about.md +45 -0
- package/src/tasks/depth-estimation/data.ts +70 -0
- package/src/tasks/depth-estimation/inference.ts +35 -0
- package/src/tasks/depth-estimation/spec/input.json +25 -0
- package/src/tasks/depth-estimation/spec/output.json +16 -0
- package/src/tasks/document-question-answering/about.md +53 -0
- package/src/tasks/document-question-answering/data.ts +85 -0
- package/src/tasks/document-question-answering/inference.ts +110 -0
- package/src/tasks/document-question-answering/spec/input.json +85 -0
- package/src/tasks/document-question-answering/spec/output.json +36 -0
- package/src/tasks/feature-extraction/about.md +72 -0
- package/src/tasks/feature-extraction/data.ts +57 -0
- package/src/tasks/feature-extraction/inference.ts +40 -0
- package/src/tasks/feature-extraction/spec/input.json +47 -0
- package/src/tasks/feature-extraction/spec/output.json +15 -0
- package/src/tasks/fill-mask/about.md +51 -0
- package/src/tasks/fill-mask/data.ts +79 -0
- package/src/tasks/fill-mask/inference.ts +62 -0
- package/src/tasks/fill-mask/spec/input.json +38 -0
- package/src/tasks/fill-mask/spec/output.json +29 -0
- package/src/tasks/image-classification/about.md +50 -0
- package/src/tasks/image-classification/data.ts +88 -0
- package/src/tasks/image-classification/inference.ts +52 -0
- package/src/tasks/image-classification/spec/input.json +35 -0
- package/src/tasks/image-classification/spec/output.json +11 -0
- package/src/tasks/image-feature-extraction/about.md +23 -0
- package/src/tasks/image-feature-extraction/data.ts +59 -0
- package/src/tasks/image-segmentation/about.md +63 -0
- package/src/tasks/image-segmentation/data.ts +99 -0
- package/src/tasks/image-segmentation/inference.ts +69 -0
- package/src/tasks/image-segmentation/spec/input.json +45 -0
- package/src/tasks/image-segmentation/spec/output.json +26 -0
- package/src/tasks/image-text-to-text/about.md +76 -0
- package/src/tasks/image-text-to-text/data.ts +102 -0
- package/src/tasks/image-to-3d/about.md +62 -0
- package/src/tasks/image-to-3d/data.ts +75 -0
- package/src/tasks/image-to-image/about.md +129 -0
- package/src/tasks/image-to-image/data.ts +101 -0
- package/src/tasks/image-to-image/inference.ts +68 -0
- package/src/tasks/image-to-image/spec/input.json +55 -0
- package/src/tasks/image-to-image/spec/output.json +12 -0
- package/src/tasks/image-to-text/about.md +61 -0
- package/src/tasks/image-to-text/data.ts +82 -0
- package/src/tasks/image-to-text/inference.ts +143 -0
- package/src/tasks/image-to-text/spec/input.json +34 -0
- package/src/tasks/image-to-text/spec/output.json +14 -0
- package/src/tasks/index.ts +312 -0
- package/src/tasks/keypoint-detection/about.md +57 -0
- package/src/tasks/keypoint-detection/data.ts +50 -0
- package/src/tasks/mask-generation/about.md +65 -0
- package/src/tasks/mask-generation/data.ts +55 -0
- package/src/tasks/object-detection/about.md +37 -0
- package/src/tasks/object-detection/data.ts +86 -0
- package/src/tasks/object-detection/inference.ts +75 -0
- package/src/tasks/object-detection/spec/input.json +31 -0
- package/src/tasks/object-detection/spec/output.json +50 -0
- package/src/tasks/placeholder/about.md +15 -0
- package/src/tasks/placeholder/data.ts +21 -0
- package/src/tasks/placeholder/spec/input.json +35 -0
- package/src/tasks/placeholder/spec/output.json +17 -0
- package/src/tasks/question-answering/about.md +56 -0
- package/src/tasks/question-answering/data.ts +75 -0
- package/src/tasks/question-answering/inference.ts +99 -0
- package/src/tasks/question-answering/spec/input.json +67 -0
- package/src/tasks/question-answering/spec/output.json +29 -0
- package/src/tasks/reinforcement-learning/about.md +167 -0
- package/src/tasks/reinforcement-learning/data.ts +75 -0
- package/src/tasks/sentence-similarity/about.md +97 -0
- package/src/tasks/sentence-similarity/data.ts +101 -0
- package/src/tasks/sentence-similarity/inference.ts +32 -0
- package/src/tasks/sentence-similarity/spec/input.json +40 -0
- package/src/tasks/sentence-similarity/spec/output.json +12 -0
- package/src/tasks/summarization/about.md +58 -0
- package/src/tasks/summarization/data.ts +76 -0
- package/src/tasks/summarization/inference.ts +57 -0
- package/src/tasks/summarization/spec/input.json +42 -0
- package/src/tasks/summarization/spec/output.json +14 -0
- package/src/tasks/table-question-answering/about.md +43 -0
- package/src/tasks/table-question-answering/data.ts +59 -0
- package/src/tasks/table-question-answering/inference.ts +61 -0
- package/src/tasks/table-question-answering/spec/input.json +44 -0
- package/src/tasks/table-question-answering/spec/output.json +40 -0
- package/src/tasks/tabular-classification/about.md +65 -0
- package/src/tasks/tabular-classification/data.ts +68 -0
- package/src/tasks/tabular-regression/about.md +87 -0
- package/src/tasks/tabular-regression/data.ts +57 -0
- package/src/tasks/text-classification/about.md +173 -0
- package/src/tasks/text-classification/data.ts +103 -0
- package/src/tasks/text-classification/inference.ts +51 -0
- package/src/tasks/text-classification/spec/input.json +35 -0
- package/src/tasks/text-classification/spec/output.json +11 -0
- package/src/tasks/text-generation/about.md +154 -0
- package/src/tasks/text-generation/data.ts +114 -0
- package/src/tasks/text-generation/inference.ts +200 -0
- package/src/tasks/text-generation/spec/input.json +219 -0
- package/src/tasks/text-generation/spec/output.json +179 -0
- package/src/tasks/text-generation/spec/stream_output.json +103 -0
- package/src/tasks/text-to-3d/about.md +62 -0
- package/src/tasks/text-to-3d/data.ts +56 -0
- package/src/tasks/text-to-audio/inference.ts +143 -0
- package/src/tasks/text-to-audio/spec/input.json +31 -0
- package/src/tasks/text-to-audio/spec/output.json +17 -0
- package/src/tasks/text-to-image/about.md +96 -0
- package/src/tasks/text-to-image/data.ts +100 -0
- package/src/tasks/text-to-image/inference.ts +75 -0
- package/src/tasks/text-to-image/spec/input.json +63 -0
- package/src/tasks/text-to-image/spec/output.json +13 -0
- package/src/tasks/text-to-speech/about.md +63 -0
- package/src/tasks/text-to-speech/data.ts +79 -0
- package/src/tasks/text-to-speech/inference.ts +145 -0
- package/src/tasks/text-to-speech/spec/input.json +31 -0
- package/src/tasks/text-to-speech/spec/output.json +7 -0
- package/src/tasks/text-to-video/about.md +41 -0
- package/src/tasks/text-to-video/data.ts +102 -0
- package/src/tasks/text2text-generation/inference.ts +55 -0
- package/src/tasks/text2text-generation/spec/input.json +55 -0
- package/src/tasks/text2text-generation/spec/output.json +14 -0
- package/src/tasks/token-classification/about.md +76 -0
- package/src/tasks/token-classification/data.ts +92 -0
- package/src/tasks/token-classification/inference.ts +85 -0
- package/src/tasks/token-classification/spec/input.json +65 -0
- package/src/tasks/token-classification/spec/output.json +37 -0
- package/src/tasks/translation/about.md +65 -0
- package/src/tasks/translation/data.ts +70 -0
- package/src/tasks/translation/inference.ts +67 -0
- package/src/tasks/translation/spec/input.json +50 -0
- package/src/tasks/translation/spec/output.json +14 -0
- package/src/tasks/unconditional-image-generation/about.md +50 -0
- package/src/tasks/unconditional-image-generation/data.ts +72 -0
- package/src/tasks/video-classification/about.md +37 -0
- package/src/tasks/video-classification/data.ts +84 -0
- package/src/tasks/video-classification/inference.ts +59 -0
- package/src/tasks/video-classification/spec/input.json +42 -0
- package/src/tasks/video-classification/spec/output.json +10 -0
- package/src/tasks/video-text-to-text/about.md +98 -0
- package/src/tasks/video-text-to-text/data.ts +66 -0
- package/src/tasks/visual-question-answering/about.md +48 -0
- package/src/tasks/visual-question-answering/data.ts +97 -0
- package/src/tasks/visual-question-answering/inference.ts +62 -0
- package/src/tasks/visual-question-answering/spec/input.json +41 -0
- package/src/tasks/visual-question-answering/spec/output.json +21 -0
- package/src/tasks/zero-shot-classification/about.md +40 -0
- package/src/tasks/zero-shot-classification/data.ts +70 -0
- package/src/tasks/zero-shot-classification/inference.ts +67 -0
- package/src/tasks/zero-shot-classification/spec/input.json +50 -0
- package/src/tasks/zero-shot-classification/spec/output.json +11 -0
- package/src/tasks/zero-shot-image-classification/about.md +75 -0
- package/src/tasks/zero-shot-image-classification/data.ts +84 -0
- package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
- package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
- package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
- package/src/tasks/zero-shot-object-detection/about.md +45 -0
- package/src/tasks/zero-shot-object-detection/data.ts +67 -0
- package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
- package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
- package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
- package/src/tokenizer-data.ts +32 -0
- package/src/widget-example.ts +125 -0
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
|
|
2
|
+
|
|
3
|
+
export function stringifyMessages(
|
|
4
|
+
messages: ChatCompletionInputMessage[],
|
|
5
|
+
opts?: {
|
|
6
|
+
indent?: string;
|
|
7
|
+
attributeKeyQuotes?: boolean;
|
|
8
|
+
customContentEscaper?: (str: string) => string;
|
|
9
|
+
}
|
|
10
|
+
): string {
|
|
11
|
+
let messagesStr = JSON.stringify(messages, null, "\t");
|
|
12
|
+
if (opts?.indent) {
|
|
13
|
+
messagesStr = messagesStr.replaceAll("\n", `\n${opts.indent}`);
|
|
14
|
+
}
|
|
15
|
+
if (!opts?.attributeKeyQuotes) {
|
|
16
|
+
messagesStr = messagesStr.replace(/"([^"]+)":/g, "$1:");
|
|
17
|
+
}
|
|
18
|
+
if (opts?.customContentEscaper) {
|
|
19
|
+
messagesStr = opts.customContentEscaper(messagesStr);
|
|
20
|
+
}
|
|
21
|
+
return messagesStr;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
type PartialGenerationParameters = Partial<Pick<GenerationParameters, "temperature" | "max_tokens" | "top_p">>;
|
|
25
|
+
|
|
26
|
+
export function stringifyGenerationConfig(
|
|
27
|
+
config: PartialGenerationParameters,
|
|
28
|
+
opts: {
|
|
29
|
+
indent: string;
|
|
30
|
+
attributeValueConnector: string;
|
|
31
|
+
attributeKeyQuotes?: boolean;
|
|
32
|
+
}
|
|
33
|
+
): string {
|
|
34
|
+
const quote = opts.attributeKeyQuotes ? `"` : "";
|
|
35
|
+
|
|
36
|
+
return Object.entries(config)
|
|
37
|
+
.map(([key, val]) => `${quote}${key}${quote}${opts.attributeValueConnector}${val}`)
|
|
38
|
+
.join(`,${opts.indent}`);
|
|
39
|
+
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import type { ModelDataMinimal } from "./types.js";
|
|
2
|
+
import { describe, expect, it } from "vitest";
|
|
3
|
+
import { getCurlInferenceSnippet } from "./curl.js";
|
|
4
|
+
|
|
5
|
+
describe("inference API snippets", () => {
|
|
6
|
+
it("conversational llm", async () => {
|
|
7
|
+
const model: ModelDataMinimal = {
|
|
8
|
+
id: "meta-llama/Llama-3.1-8B-Instruct",
|
|
9
|
+
pipeline_tag: "text-generation",
|
|
10
|
+
tags: ["conversational"],
|
|
11
|
+
inference: "",
|
|
12
|
+
};
|
|
13
|
+
const snippet = getCurlInferenceSnippet(model, "api_token");
|
|
14
|
+
|
|
15
|
+
expect(snippet.content)
|
|
16
|
+
.toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions' \\
|
|
17
|
+
-H "Authorization: Bearer api_token" \\
|
|
18
|
+
-H 'Content-Type: application/json' \\
|
|
19
|
+
--data '{
|
|
20
|
+
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
21
|
+
"messages": [
|
|
22
|
+
{
|
|
23
|
+
"role": "user",
|
|
24
|
+
"content": "What is the capital of France?"
|
|
25
|
+
}
|
|
26
|
+
],
|
|
27
|
+
"max_tokens": 500,
|
|
28
|
+
"stream": true
|
|
29
|
+
}'`);
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
it("conversational llm non-streaming", async () => {
|
|
33
|
+
const model: ModelDataMinimal = {
|
|
34
|
+
id: "meta-llama/Llama-3.1-8B-Instruct",
|
|
35
|
+
pipeline_tag: "text-generation",
|
|
36
|
+
tags: ["conversational"],
|
|
37
|
+
inference: "",
|
|
38
|
+
};
|
|
39
|
+
const snippet = getCurlInferenceSnippet(model, "api_token", { streaming: false });
|
|
40
|
+
|
|
41
|
+
expect(snippet.content)
|
|
42
|
+
.toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions' \\
|
|
43
|
+
-H "Authorization: Bearer api_token" \\
|
|
44
|
+
-H 'Content-Type: application/json' \\
|
|
45
|
+
--data '{
|
|
46
|
+
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
47
|
+
"messages": [
|
|
48
|
+
{
|
|
49
|
+
"role": "user",
|
|
50
|
+
"content": "What is the capital of France?"
|
|
51
|
+
}
|
|
52
|
+
],
|
|
53
|
+
"max_tokens": 500,
|
|
54
|
+
"stream": false
|
|
55
|
+
}'`);
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
it("conversational vlm", async () => {
|
|
59
|
+
const model: ModelDataMinimal = {
|
|
60
|
+
id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
61
|
+
pipeline_tag: "image-text-to-text",
|
|
62
|
+
tags: ["conversational"],
|
|
63
|
+
inference: "",
|
|
64
|
+
};
|
|
65
|
+
const snippet = getCurlInferenceSnippet(model, "api_token");
|
|
66
|
+
|
|
67
|
+
expect(snippet.content)
|
|
68
|
+
.toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions' \\
|
|
69
|
+
-H "Authorization: Bearer api_token" \\
|
|
70
|
+
-H 'Content-Type: application/json' \\
|
|
71
|
+
--data '{
|
|
72
|
+
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
73
|
+
"messages": [
|
|
74
|
+
{
|
|
75
|
+
"role": "user",
|
|
76
|
+
"content": [
|
|
77
|
+
{
|
|
78
|
+
"type": "text",
|
|
79
|
+
"text": "Describe this image in one sentence."
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
"type": "image_url",
|
|
83
|
+
"image_url": {
|
|
84
|
+
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
]
|
|
88
|
+
}
|
|
89
|
+
],
|
|
90
|
+
"max_tokens": 500,
|
|
91
|
+
"stream": true
|
|
92
|
+
}'`);
|
|
93
|
+
});
|
|
94
|
+
});
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
import type { PipelineType } from "../pipelines.js";
|
|
2
|
+
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
|
|
3
|
+
import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
|
|
4
|
+
import { getModelInputSnippet } from "./inputs.js";
|
|
5
|
+
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
|
|
6
|
+
|
|
7
|
+
export const snippetBasic = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
|
|
8
|
+
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
9
|
+
-X POST \\
|
|
10
|
+
-d '{"inputs": ${getModelInputSnippet(model, true)}}' \\
|
|
11
|
+
-H 'Content-Type: application/json' \\
|
|
12
|
+
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`,
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
export const snippetTextGeneration = (
|
|
16
|
+
model: ModelDataMinimal,
|
|
17
|
+
accessToken: string,
|
|
18
|
+
opts?: {
|
|
19
|
+
streaming?: boolean;
|
|
20
|
+
messages?: ChatCompletionInputMessage[];
|
|
21
|
+
temperature?: GenerationParameters["temperature"];
|
|
22
|
+
max_tokens?: GenerationParameters["max_tokens"];
|
|
23
|
+
top_p?: GenerationParameters["top_p"];
|
|
24
|
+
}
|
|
25
|
+
): InferenceSnippet => {
|
|
26
|
+
if (model.tags.includes("conversational")) {
|
|
27
|
+
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
28
|
+
const streaming = opts?.streaming ?? true;
|
|
29
|
+
const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
|
|
30
|
+
const messages = opts?.messages ?? exampleMessages;
|
|
31
|
+
|
|
32
|
+
const config = {
|
|
33
|
+
...(opts?.temperature ? { temperature: opts.temperature } : undefined),
|
|
34
|
+
max_tokens: opts?.max_tokens ?? 500,
|
|
35
|
+
...(opts?.top_p ? { top_p: opts.top_p } : undefined),
|
|
36
|
+
};
|
|
37
|
+
return {
|
|
38
|
+
content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
|
|
39
|
+
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
|
|
40
|
+
-H 'Content-Type: application/json' \\
|
|
41
|
+
--data '{
|
|
42
|
+
"model": "${model.id}",
|
|
43
|
+
"messages": ${stringifyMessages(messages, {
|
|
44
|
+
indent: "\t",
|
|
45
|
+
attributeKeyQuotes: true,
|
|
46
|
+
customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
|
|
47
|
+
})},
|
|
48
|
+
${stringifyGenerationConfig(config, {
|
|
49
|
+
indent: "\n ",
|
|
50
|
+
attributeKeyQuotes: true,
|
|
51
|
+
attributeValueConnector: ": ",
|
|
52
|
+
})},
|
|
53
|
+
"stream": ${!!streaming}
|
|
54
|
+
}'`,
|
|
55
|
+
};
|
|
56
|
+
} else {
|
|
57
|
+
return snippetBasic(model, accessToken);
|
|
58
|
+
}
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
|
|
62
|
+
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
63
|
+
-X POST \\
|
|
64
|
+
-d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
|
|
65
|
+
-H 'Content-Type: application/json' \\
|
|
66
|
+
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`,
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
export const snippetFile = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
|
|
70
|
+
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
71
|
+
-X POST \\
|
|
72
|
+
--data-binary '@${getModelInputSnippet(model, true, true)}' \\
|
|
73
|
+
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`,
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
export const curlSnippets: Partial<
|
|
77
|
+
Record<
|
|
78
|
+
PipelineType,
|
|
79
|
+
(model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet
|
|
80
|
+
>
|
|
81
|
+
> = {
|
|
82
|
+
// Same order as in js/src/lib/interfaces/Types.ts
|
|
83
|
+
"text-classification": snippetBasic,
|
|
84
|
+
"token-classification": snippetBasic,
|
|
85
|
+
"table-question-answering": snippetBasic,
|
|
86
|
+
"question-answering": snippetBasic,
|
|
87
|
+
"zero-shot-classification": snippetZeroShotClassification,
|
|
88
|
+
translation: snippetBasic,
|
|
89
|
+
summarization: snippetBasic,
|
|
90
|
+
"feature-extraction": snippetBasic,
|
|
91
|
+
"text-generation": snippetTextGeneration,
|
|
92
|
+
"image-text-to-text": snippetTextGeneration,
|
|
93
|
+
"text2text-generation": snippetBasic,
|
|
94
|
+
"fill-mask": snippetBasic,
|
|
95
|
+
"sentence-similarity": snippetBasic,
|
|
96
|
+
"automatic-speech-recognition": snippetFile,
|
|
97
|
+
"text-to-image": snippetBasic,
|
|
98
|
+
"text-to-speech": snippetBasic,
|
|
99
|
+
"text-to-audio": snippetBasic,
|
|
100
|
+
"audio-to-audio": snippetFile,
|
|
101
|
+
"audio-classification": snippetFile,
|
|
102
|
+
"image-classification": snippetFile,
|
|
103
|
+
"image-to-text": snippetFile,
|
|
104
|
+
"object-detection": snippetFile,
|
|
105
|
+
"image-segmentation": snippetFile,
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
export function getCurlInferenceSnippet(
|
|
109
|
+
model: ModelDataMinimal,
|
|
110
|
+
accessToken: string,
|
|
111
|
+
opts?: Record<string, unknown>
|
|
112
|
+
): InferenceSnippet {
|
|
113
|
+
return model.pipeline_tag && model.pipeline_tag in curlSnippets
|
|
114
|
+
? curlSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" }
|
|
115
|
+
: { content: "" };
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
export function hasCurlInferenceSnippet(model: Pick<ModelDataMinimal, "pipeline_tag">): boolean {
|
|
119
|
+
return !!model.pipeline_tag && model.pipeline_tag in curlSnippets;
|
|
120
|
+
}
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
import type { PipelineType } from "../pipelines.js";
|
|
2
|
+
import type { ChatCompletionInputMessage } from "../tasks/index.js";
|
|
3
|
+
import type { ModelDataMinimal } from "./types.js";
|
|
4
|
+
|
|
5
|
+
const inputsZeroShotClassification = () =>
|
|
6
|
+
`"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`;
|
|
7
|
+
|
|
8
|
+
const inputsTranslation = () => `"Меня зовут Вольфганг и я живу в Берлине"`;
|
|
9
|
+
|
|
10
|
+
const inputsSummarization = () =>
|
|
11
|
+
`"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`;
|
|
12
|
+
|
|
13
|
+
const inputsTableQuestionAnswering = () =>
|
|
14
|
+
`{
|
|
15
|
+
"query": "How many stars does the transformers repository have?",
|
|
16
|
+
"table": {
|
|
17
|
+
"Repository": ["Transformers", "Datasets", "Tokenizers"],
|
|
18
|
+
"Stars": ["36542", "4512", "3934"],
|
|
19
|
+
"Contributors": ["651", "77", "34"],
|
|
20
|
+
"Programming language": [
|
|
21
|
+
"Python",
|
|
22
|
+
"Python",
|
|
23
|
+
"Rust, Python and NodeJS"
|
|
24
|
+
]
|
|
25
|
+
}
|
|
26
|
+
}`;
|
|
27
|
+
|
|
28
|
+
const inputsVisualQuestionAnswering = () =>
|
|
29
|
+
`{
|
|
30
|
+
"image": "cat.png",
|
|
31
|
+
"question": "What is in this image?"
|
|
32
|
+
}`;
|
|
33
|
+
|
|
34
|
+
const inputsQuestionAnswering = () =>
|
|
35
|
+
`{
|
|
36
|
+
"question": "What is my name?",
|
|
37
|
+
"context": "My name is Clara and I live in Berkeley."
|
|
38
|
+
}`;
|
|
39
|
+
|
|
40
|
+
const inputsTextClassification = () => `"I like you. I love you"`;
|
|
41
|
+
|
|
42
|
+
const inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
|
|
43
|
+
|
|
44
|
+
const inputsTextGeneration = (model: ModelDataMinimal): string | ChatCompletionInputMessage[] => {
|
|
45
|
+
if (model.tags.includes("conversational")) {
|
|
46
|
+
return model.pipeline_tag === "text-generation"
|
|
47
|
+
? [{ role: "user", content: "What is the capital of France?" }]
|
|
48
|
+
: [
|
|
49
|
+
{
|
|
50
|
+
role: "user",
|
|
51
|
+
content: [
|
|
52
|
+
{
|
|
53
|
+
type: "text",
|
|
54
|
+
text: "Describe this image in one sentence.",
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
type: "image_url",
|
|
58
|
+
image_url: {
|
|
59
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
|
|
60
|
+
},
|
|
61
|
+
},
|
|
62
|
+
],
|
|
63
|
+
},
|
|
64
|
+
];
|
|
65
|
+
}
|
|
66
|
+
return `"Can you please let us know more details about your "`;
|
|
67
|
+
};
|
|
68
|
+
|
|
69
|
+
const inputsText2TextGeneration = () => `"The answer to the universe is"`;
|
|
70
|
+
|
|
71
|
+
const inputsFillMask = (model: ModelDataMinimal) => `"The answer to the universe is ${model.mask_token}."`;
|
|
72
|
+
|
|
73
|
+
const inputsSentenceSimilarity = () =>
|
|
74
|
+
`{
|
|
75
|
+
"source_sentence": "That is a happy person",
|
|
76
|
+
"sentences": [
|
|
77
|
+
"That is a happy dog",
|
|
78
|
+
"That is a very happy person",
|
|
79
|
+
"Today is a sunny day"
|
|
80
|
+
]
|
|
81
|
+
}`;
|
|
82
|
+
|
|
83
|
+
const inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`;
|
|
84
|
+
|
|
85
|
+
const inputsImageClassification = () => `"cats.jpg"`;
|
|
86
|
+
|
|
87
|
+
const inputsImageToText = () => `"cats.jpg"`;
|
|
88
|
+
|
|
89
|
+
const inputsImageSegmentation = () => `"cats.jpg"`;
|
|
90
|
+
|
|
91
|
+
const inputsObjectDetection = () => `"cats.jpg"`;
|
|
92
|
+
|
|
93
|
+
const inputsAudioToAudio = () => `"sample1.flac"`;
|
|
94
|
+
|
|
95
|
+
const inputsAudioClassification = () => `"sample1.flac"`;
|
|
96
|
+
|
|
97
|
+
const inputsTextToImage = () => `"Astronaut riding a horse"`;
|
|
98
|
+
|
|
99
|
+
const inputsTextToSpeech = () => `"The answer to the universe is 42"`;
|
|
100
|
+
|
|
101
|
+
const inputsTextToAudio = () => `"liquid drum and bass, atmospheric synths, airy sounds"`;
|
|
102
|
+
|
|
103
|
+
const inputsAutomaticSpeechRecognition = () => `"sample1.flac"`;
|
|
104
|
+
|
|
105
|
+
const inputsTabularPrediction = () =>
|
|
106
|
+
`'{"Height":[11.52,12.48],"Length1":[23.2,24.0],"Length2":[25.4,26.3],"Species": ["Bream","Bream"]}'`;
|
|
107
|
+
|
|
108
|
+
const inputsZeroShotImageClassification = () => `"cats.jpg"`;
|
|
109
|
+
|
|
110
|
+
const modelInputSnippets: {
|
|
111
|
+
[key in PipelineType]?: (model: ModelDataMinimal) => string | ChatCompletionInputMessage[];
|
|
112
|
+
} = {
|
|
113
|
+
"audio-to-audio": inputsAudioToAudio,
|
|
114
|
+
"audio-classification": inputsAudioClassification,
|
|
115
|
+
"automatic-speech-recognition": inputsAutomaticSpeechRecognition,
|
|
116
|
+
"document-question-answering": inputsVisualQuestionAnswering,
|
|
117
|
+
"feature-extraction": inputsFeatureExtraction,
|
|
118
|
+
"fill-mask": inputsFillMask,
|
|
119
|
+
"image-classification": inputsImageClassification,
|
|
120
|
+
"image-to-text": inputsImageToText,
|
|
121
|
+
"image-segmentation": inputsImageSegmentation,
|
|
122
|
+
"object-detection": inputsObjectDetection,
|
|
123
|
+
"question-answering": inputsQuestionAnswering,
|
|
124
|
+
"sentence-similarity": inputsSentenceSimilarity,
|
|
125
|
+
summarization: inputsSummarization,
|
|
126
|
+
"table-question-answering": inputsTableQuestionAnswering,
|
|
127
|
+
"tabular-regression": inputsTabularPrediction,
|
|
128
|
+
"tabular-classification": inputsTabularPrediction,
|
|
129
|
+
"text-classification": inputsTextClassification,
|
|
130
|
+
"text-generation": inputsTextGeneration,
|
|
131
|
+
"image-text-to-text": inputsTextGeneration,
|
|
132
|
+
"text-to-image": inputsTextToImage,
|
|
133
|
+
"text-to-speech": inputsTextToSpeech,
|
|
134
|
+
"text-to-audio": inputsTextToAudio,
|
|
135
|
+
"text2text-generation": inputsText2TextGeneration,
|
|
136
|
+
"token-classification": inputsTokenClassification,
|
|
137
|
+
translation: inputsTranslation,
|
|
138
|
+
"zero-shot-classification": inputsZeroShotClassification,
|
|
139
|
+
"zero-shot-image-classification": inputsZeroShotImageClassification,
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
// Use noWrap to put the whole snippet on a single line (removing new lines and tabulations)
|
|
143
|
+
// Use noQuotes to strip quotes from start & end (example: "abc" -> abc)
|
|
144
|
+
export function getModelInputSnippet(
|
|
145
|
+
model: ModelDataMinimal,
|
|
146
|
+
noWrap = false,
|
|
147
|
+
noQuotes = false
|
|
148
|
+
): string | ChatCompletionInputMessage[] {
|
|
149
|
+
if (model.pipeline_tag) {
|
|
150
|
+
const inputs = modelInputSnippets[model.pipeline_tag];
|
|
151
|
+
if (inputs) {
|
|
152
|
+
let result = inputs(model);
|
|
153
|
+
if (typeof result === "string") {
|
|
154
|
+
if (noWrap) {
|
|
155
|
+
result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
|
|
156
|
+
}
|
|
157
|
+
if (noQuotes) {
|
|
158
|
+
const REGEX_QUOTES = /^"(.+)"$/s;
|
|
159
|
+
const match = result.match(REGEX_QUOTES);
|
|
160
|
+
result = match ? match[1] : result;
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
return result;
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
return "No input example has been defined for this model task.";
|
|
167
|
+
}
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
|
|
2
|
+
import { describe, expect, it } from "vitest";
|
|
3
|
+
import { getJsInferenceSnippet } from "./js.js";
|
|
4
|
+
|
|
5
|
+
describe("inference API snippets", () => {
|
|
6
|
+
it("conversational llm", async () => {
|
|
7
|
+
const model: ModelDataMinimal = {
|
|
8
|
+
id: "meta-llama/Llama-3.1-8B-Instruct",
|
|
9
|
+
pipeline_tag: "text-generation",
|
|
10
|
+
tags: ["conversational"],
|
|
11
|
+
inference: "",
|
|
12
|
+
};
|
|
13
|
+
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
|
|
14
|
+
|
|
15
|
+
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
|
|
16
|
+
|
|
17
|
+
const client = new HfInference("api_token")
|
|
18
|
+
|
|
19
|
+
let out = "";
|
|
20
|
+
|
|
21
|
+
const stream = client.chatCompletionStream({
|
|
22
|
+
model: "meta-llama/Llama-3.1-8B-Instruct",
|
|
23
|
+
messages: [
|
|
24
|
+
{
|
|
25
|
+
role: "user",
|
|
26
|
+
content: "What is the capital of France?"
|
|
27
|
+
}
|
|
28
|
+
],
|
|
29
|
+
max_tokens: 500
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
for await (const chunk of stream) {
|
|
33
|
+
if (chunk.choices && chunk.choices.length > 0) {
|
|
34
|
+
const newContent = chunk.choices[0].delta.content;
|
|
35
|
+
out += newContent;
|
|
36
|
+
console.log(newContent);
|
|
37
|
+
}
|
|
38
|
+
}`);
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
it("conversational llm non-streaming", async () => {
|
|
42
|
+
const model: ModelDataMinimal = {
|
|
43
|
+
id: "meta-llama/Llama-3.1-8B-Instruct",
|
|
44
|
+
pipeline_tag: "text-generation",
|
|
45
|
+
tags: ["conversational"],
|
|
46
|
+
inference: "",
|
|
47
|
+
};
|
|
48
|
+
const snippet = getJsInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[];
|
|
49
|
+
|
|
50
|
+
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
|
|
51
|
+
|
|
52
|
+
const client = new HfInference("api_token")
|
|
53
|
+
|
|
54
|
+
const chatCompletion = await client.chatCompletion({
|
|
55
|
+
model: "meta-llama/Llama-3.1-8B-Instruct",
|
|
56
|
+
messages: [
|
|
57
|
+
{
|
|
58
|
+
role: "user",
|
|
59
|
+
content: "What is the capital of France?"
|
|
60
|
+
}
|
|
61
|
+
],
|
|
62
|
+
max_tokens: 500
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
console.log(chatCompletion.choices[0].message);`);
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
it("conversational vlm", async () => {
|
|
69
|
+
const model: ModelDataMinimal = {
|
|
70
|
+
id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
71
|
+
pipeline_tag: "image-text-to-text",
|
|
72
|
+
tags: ["conversational"],
|
|
73
|
+
inference: "",
|
|
74
|
+
};
|
|
75
|
+
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
|
|
76
|
+
|
|
77
|
+
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
|
|
78
|
+
|
|
79
|
+
const client = new HfInference("api_token")
|
|
80
|
+
|
|
81
|
+
let out = "";
|
|
82
|
+
|
|
83
|
+
const stream = client.chatCompletionStream({
|
|
84
|
+
model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
85
|
+
messages: [
|
|
86
|
+
{
|
|
87
|
+
role: "user",
|
|
88
|
+
content: [
|
|
89
|
+
{
|
|
90
|
+
type: "text",
|
|
91
|
+
text: "Describe this image in one sentence."
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
type: "image_url",
|
|
95
|
+
image_url: {
|
|
96
|
+
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
]
|
|
100
|
+
}
|
|
101
|
+
],
|
|
102
|
+
max_tokens: 500
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
for await (const chunk of stream) {
|
|
106
|
+
if (chunk.choices && chunk.choices.length > 0) {
|
|
107
|
+
const newContent = chunk.choices[0].delta.content;
|
|
108
|
+
out += newContent;
|
|
109
|
+
console.log(newContent);
|
|
110
|
+
}
|
|
111
|
+
}`);
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
it("conversational llm", async () => {
|
|
115
|
+
const model: ModelDataMinimal = {
|
|
116
|
+
id: "meta-llama/Llama-3.1-8B-Instruct",
|
|
117
|
+
pipeline_tag: "text-generation",
|
|
118
|
+
tags: ["conversational"],
|
|
119
|
+
inference: "",
|
|
120
|
+
};
|
|
121
|
+
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];
|
|
122
|
+
|
|
123
|
+
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
|
|
124
|
+
|
|
125
|
+
const client = new HfInference("api_token")
|
|
126
|
+
|
|
127
|
+
let out = "";
|
|
128
|
+
|
|
129
|
+
const stream = client.chatCompletionStream({
|
|
130
|
+
model: "meta-llama/Llama-3.1-8B-Instruct",
|
|
131
|
+
messages: [
|
|
132
|
+
{
|
|
133
|
+
role: "user",
|
|
134
|
+
content: "What is the capital of France?"
|
|
135
|
+
}
|
|
136
|
+
],
|
|
137
|
+
max_tokens: 500
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
for await (const chunk of stream) {
|
|
141
|
+
if (chunk.choices && chunk.choices.length > 0) {
|
|
142
|
+
const newContent = chunk.choices[0].delta.content;
|
|
143
|
+
out += newContent;
|
|
144
|
+
console.log(newContent);
|
|
145
|
+
}
|
|
146
|
+
}`);
|
|
147
|
+
});
|
|
148
|
+
});
|