@huggingface/tasks 0.12.28 → 0.12.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/index.cjs +113 -59
  2. package/dist/index.js +108 -58
  3. package/dist/src/gguf.d.ts +35 -0
  4. package/dist/src/gguf.d.ts.map +1 -0
  5. package/dist/src/index.d.ts +1 -0
  6. package/dist/src/index.d.ts.map +1 -1
  7. package/dist/src/local-apps.d.ts.map +1 -1
  8. package/dist/src/model-libraries-snippets.d.ts +1 -0
  9. package/dist/src/model-libraries-snippets.d.ts.map +1 -1
  10. package/dist/src/model-libraries.d.ts +23 -3
  11. package/dist/src/model-libraries.d.ts.map +1 -1
  12. package/dist/src/snippets/common.d.ts +6 -12
  13. package/dist/src/snippets/common.d.ts.map +1 -1
  14. package/dist/src/snippets/curl.d.ts.map +1 -1
  15. package/dist/src/snippets/curl.spec.d.ts +2 -0
  16. package/dist/src/snippets/curl.spec.d.ts.map +1 -0
  17. package/dist/src/snippets/inputs.d.ts.map +1 -1
  18. package/dist/src/snippets/js.d.ts.map +1 -1
  19. package/dist/src/snippets/js.spec.d.ts +2 -0
  20. package/dist/src/snippets/js.spec.d.ts.map +1 -0
  21. package/dist/src/snippets/python.d.ts.map +1 -1
  22. package/dist/src/snippets/python.spec.d.ts +2 -0
  23. package/dist/src/snippets/python.spec.d.ts.map +1 -0
  24. package/package.json +2 -4
  25. package/src/gguf.ts +40 -0
  26. package/src/index.ts +2 -0
  27. package/src/local-apps.ts +1 -1
  28. package/src/model-libraries-snippets.ts +19 -4
  29. package/src/model-libraries.ts +23 -3
  30. package/src/snippets/common.ts +27 -51
  31. package/src/snippets/curl.spec.ts +68 -0
  32. package/src/snippets/curl.ts +2 -6
  33. package/src/snippets/inputs.ts +1 -0
  34. package/src/snippets/js.spec.ts +86 -0
  35. package/src/snippets/js.ts +2 -4
  36. package/src/snippets/python.spec.ts +78 -0
  37. package/src/snippets/python.ts +4 -11
@@ -0,0 +1,86 @@
1
+ import type { InferenceSnippet, ModelDataMinimal } from "./types";
2
+ import { describe, expect, it } from "vitest";
3
+ import { snippetTextGeneration } from "./js";
4
+
5
+ describe("inference API snippets", () => {
6
+ it("conversational llm", async () => {
7
+ const model: ModelDataMinimal = {
8
+ id: "meta-llama/Llama-3.1-8B-Instruct",
9
+ pipeline_tag: "text-generation",
10
+ tags: ["conversational"],
11
+ inference: "",
12
+ };
13
+ const snippet = snippetTextGeneration(model, "api_token") as InferenceSnippet[];
14
+
15
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
16
+
17
+ const client = new HfInference("api_token")
18
+
19
+ let out = "";
20
+
21
+ const stream = client.chatCompletionStream({
22
+ model: "meta-llama/Llama-3.1-8B-Instruct",
23
+ messages: [
24
+ {
25
+ role: "user",
26
+ content: "What is the capital of France?"
27
+ }
28
+ ],
29
+ max_tokens: 500
30
+ });
31
+
32
+ for await (const chunk of stream) {
33
+ if (chunk.choices && chunk.choices.length > 0) {
34
+ const newContent = chunk.choices[0].delta.content;
35
+ out += newContent;
36
+ console.log(newContent);
37
+ }
38
+ }`);
39
+ });
40
+
41
+ it("conversational vlm", async () => {
42
+ const model: ModelDataMinimal = {
43
+ id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
44
+ pipeline_tag: "image-text-to-text",
45
+ tags: ["conversational"],
46
+ inference: "",
47
+ };
48
+ const snippet = snippetTextGeneration(model, "api_token") as InferenceSnippet[];
49
+
50
+ expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
51
+
52
+ const client = new HfInference("api_token")
53
+
54
+ let out = "";
55
+
56
+ const stream = client.chatCompletionStream({
57
+ model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
58
+ messages: [
59
+ {
60
+ role: "user",
61
+ content: [
62
+ {
63
+ type: "text",
64
+ text: "Describe this image in one sentence."
65
+ },
66
+ {
67
+ type: "image_url",
68
+ image_url: {
69
+ url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
70
+ }
71
+ }
72
+ ]
73
+ }
74
+ ],
75
+ max_tokens: 500
76
+ });
77
+
78
+ for await (const chunk of stream) {
79
+ if (chunk.choices && chunk.choices.length > 0) {
80
+ const newContent = chunk.choices[0].delta.content;
81
+ out += newContent;
82
+ console.log(newContent);
83
+ }
84
+ }`);
85
+ });
86
+ });
@@ -42,7 +42,7 @@ export const snippetTextGeneration = (
42
42
  const streaming = opts?.streaming ?? true;
43
43
  const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
44
44
  const messages = opts?.messages ?? exampleMessages;
45
- const messagesStr = stringifyMessages(messages, { sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" });
45
+ const messagesStr = stringifyMessages(messages, { indent: "\t" });
46
46
 
47
47
  const config = {
48
48
  ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
@@ -50,9 +50,7 @@ export const snippetTextGeneration = (
50
50
  ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
51
51
  };
52
52
  const configStr = stringifyGenerationConfig(config, {
53
- sep: ",\n\t",
54
- start: "",
55
- end: "",
53
+ indent: "\n\t",
56
54
  attributeValueConnector: ": ",
57
55
  });
58
56
 
@@ -0,0 +1,78 @@
1
+ import type { ModelDataMinimal } from "./types";
2
+ import { describe, expect, it } from "vitest";
3
+ import { snippetConversational } from "./python";
4
+
5
+ describe("inference API snippets", () => {
6
+ it("conversational llm", async () => {
7
+ const model: ModelDataMinimal = {
8
+ id: "meta-llama/Llama-3.1-8B-Instruct",
9
+ pipeline_tag: "text-generation",
10
+ tags: ["conversational"],
11
+ inference: "",
12
+ };
13
+ const snippet = snippetConversational(model, "api_token");
14
+
15
+ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
16
+
17
+ client = InferenceClient(api_key="api_token")
18
+
19
+ messages = [
20
+ {
21
+ "role": "user",
22
+ "content": "What is the capital of France?"
23
+ }
24
+ ]
25
+
26
+ stream = client.chat.completions.create(
27
+ model="meta-llama/Llama-3.1-8B-Instruct",
28
+ messages=messages,
29
+ max_tokens=500,
30
+ stream=True
31
+ )
32
+
33
+ for chunk in stream:
34
+ print(chunk.choices[0].delta.content, end="")`);
35
+ });
36
+
37
+ it("conversational vlm", async () => {
38
+ const model: ModelDataMinimal = {
39
+ id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
40
+ pipeline_tag: "image-text-to-text",
41
+ tags: ["conversational"],
42
+ inference: "",
43
+ };
44
+ const snippet = snippetConversational(model, "api_token");
45
+
46
+ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
47
+
48
+ client = InferenceClient(api_key="api_token")
49
+
50
+ messages = [
51
+ {
52
+ "role": "user",
53
+ "content": [
54
+ {
55
+ "type": "text",
56
+ "text": "Describe this image in one sentence."
57
+ },
58
+ {
59
+ "type": "image_url",
60
+ "image_url": {
61
+ "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
62
+ }
63
+ }
64
+ ]
65
+ }
66
+ ]
67
+
68
+ stream = client.chat.completions.create(
69
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct",
70
+ messages=messages,
71
+ max_tokens=500,
72
+ stream=True
73
+ )
74
+
75
+ for chunk in stream:
76
+ print(chunk.choices[0].delta.content, end="")`);
77
+ });
78
+ });
@@ -18,12 +18,7 @@ export const snippetConversational = (
18
18
  const streaming = opts?.streaming ?? true;
19
19
  const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
20
20
  const messages = opts?.messages ?? exampleMessages;
21
- const messagesStr = stringifyMessages(messages, {
22
- sep: ",\n\t",
23
- start: `[\n\t`,
24
- end: `\n]`,
25
- attributeKeyQuotes: true,
26
- });
21
+ const messagesStr = stringifyMessages(messages, { attributeKeyQuotes: true });
27
22
 
28
23
  const config = {
29
24
  ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
@@ -31,9 +26,7 @@ export const snippetConversational = (
31
26
  ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
32
27
  };
33
28
  const configStr = stringifyGenerationConfig(config, {
34
- sep: ",\n\t",
35
- start: "",
36
- end: "",
29
+ indent: "\n\t",
37
30
  attributeValueConnector: "=",
38
31
  });
39
32
 
@@ -55,7 +48,7 @@ stream = client.chat.completions.create(
55
48
  )
56
49
 
57
50
  for chunk in stream:
58
- print(chunk.choices[0].delta.content)`,
51
+ print(chunk.choices[0].delta.content, end="")`,
59
52
  },
60
53
  {
61
54
  client: "openai",
@@ -76,7 +69,7 @@ stream = client.chat.completions.create(
76
69
  )
77
70
 
78
71
  for chunk in stream:
79
- print(chunk.choices[0].delta.content)`,
72
+ print(chunk.choices[0].delta.content, end="")`,
80
73
  },
81
74
  ];
82
75
  } else {