@huggingface/tasks 0.13.5 → 0.13.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (197) hide show
  1. package/dist/commonjs/hardware.d.ts +12 -0
  2. package/dist/commonjs/hardware.d.ts.map +1 -1
  3. package/dist/commonjs/hardware.js +15 -3
  4. package/dist/commonjs/snippets/curl.js +4 -4
  5. package/dist/commonjs/snippets/js.js +5 -5
  6. package/dist/commonjs/tasks/audio-classification/inference.d.ts +1 -3
  7. package/dist/commonjs/tasks/audio-classification/inference.d.ts.map +1 -1
  8. package/dist/commonjs/tasks/automatic-speech-recognition/inference.d.ts +1 -5
  9. package/dist/commonjs/tasks/automatic-speech-recognition/inference.d.ts.map +1 -1
  10. package/dist/commonjs/tasks/chat-completion/inference.d.ts +18 -6
  11. package/dist/commonjs/tasks/chat-completion/inference.d.ts.map +1 -1
  12. package/dist/commonjs/tasks/depth-estimation/inference.d.ts +1 -1
  13. package/dist/commonjs/tasks/document-question-answering/inference.d.ts +1 -3
  14. package/dist/commonjs/tasks/document-question-answering/inference.d.ts.map +1 -1
  15. package/dist/commonjs/tasks/feature-extraction/inference.d.ts +1 -1
  16. package/dist/commonjs/tasks/fill-mask/inference.d.ts +1 -3
  17. package/dist/commonjs/tasks/fill-mask/inference.d.ts.map +1 -1
  18. package/dist/commonjs/tasks/image-classification/inference.d.ts +1 -3
  19. package/dist/commonjs/tasks/image-classification/inference.d.ts.map +1 -1
  20. package/dist/commonjs/tasks/image-segmentation/inference.d.ts +1 -3
  21. package/dist/commonjs/tasks/image-segmentation/inference.d.ts.map +1 -1
  22. package/dist/commonjs/tasks/image-to-image/inference.d.ts +1 -3
  23. package/dist/commonjs/tasks/image-to-image/inference.d.ts.map +1 -1
  24. package/dist/commonjs/tasks/image-to-text/inference.d.ts +1 -5
  25. package/dist/commonjs/tasks/image-to-text/inference.d.ts.map +1 -1
  26. package/dist/commonjs/tasks/object-detection/inference.d.ts +1 -3
  27. package/dist/commonjs/tasks/object-detection/inference.d.ts.map +1 -1
  28. package/dist/commonjs/tasks/question-answering/inference.d.ts +1 -3
  29. package/dist/commonjs/tasks/question-answering/inference.d.ts.map +1 -1
  30. package/dist/commonjs/tasks/sentence-similarity/inference.d.ts +1 -1
  31. package/dist/commonjs/tasks/summarization/inference.d.ts +1 -3
  32. package/dist/commonjs/tasks/summarization/inference.d.ts.map +1 -1
  33. package/dist/commonjs/tasks/table-question-answering/inference.d.ts +1 -1
  34. package/dist/commonjs/tasks/text-classification/inference.d.ts +1 -3
  35. package/dist/commonjs/tasks/text-classification/inference.d.ts.map +1 -1
  36. package/dist/commonjs/tasks/text-to-audio/inference.d.ts +1 -5
  37. package/dist/commonjs/tasks/text-to-audio/inference.d.ts.map +1 -1
  38. package/dist/commonjs/tasks/text-to-image/inference.d.ts +1 -3
  39. package/dist/commonjs/tasks/text-to-image/inference.d.ts.map +1 -1
  40. package/dist/commonjs/tasks/text-to-speech/inference.d.ts +1 -5
  41. package/dist/commonjs/tasks/text-to-speech/inference.d.ts.map +1 -1
  42. package/dist/commonjs/tasks/text2text-generation/inference.d.ts +1 -3
  43. package/dist/commonjs/tasks/text2text-generation/inference.d.ts.map +1 -1
  44. package/dist/commonjs/tasks/token-classification/inference.d.ts +1 -3
  45. package/dist/commonjs/tasks/token-classification/inference.d.ts.map +1 -1
  46. package/dist/commonjs/tasks/translation/inference.d.ts +1 -3
  47. package/dist/commonjs/tasks/translation/inference.d.ts.map +1 -1
  48. package/dist/commonjs/tasks/video-classification/inference.d.ts +1 -3
  49. package/dist/commonjs/tasks/video-classification/inference.d.ts.map +1 -1
  50. package/dist/commonjs/tasks/visual-question-answering/inference.d.ts +1 -3
  51. package/dist/commonjs/tasks/visual-question-answering/inference.d.ts.map +1 -1
  52. package/dist/commonjs/tasks/zero-shot-classification/inference.d.ts +1 -3
  53. package/dist/commonjs/tasks/zero-shot-classification/inference.d.ts.map +1 -1
  54. package/dist/commonjs/tasks/zero-shot-image-classification/inference.d.ts +1 -3
  55. package/dist/commonjs/tasks/zero-shot-image-classification/inference.d.ts.map +1 -1
  56. package/dist/commonjs/tasks/zero-shot-object-detection/inference.d.ts +1 -3
  57. package/dist/commonjs/tasks/zero-shot-object-detection/inference.d.ts.map +1 -1
  58. package/dist/esm/hardware.d.ts +12 -0
  59. package/dist/esm/hardware.d.ts.map +1 -1
  60. package/dist/esm/hardware.js +15 -3
  61. package/dist/esm/snippets/curl.js +4 -4
  62. package/dist/esm/snippets/js.js +5 -5
  63. package/dist/esm/tasks/audio-classification/inference.d.ts +1 -3
  64. package/dist/esm/tasks/audio-classification/inference.d.ts.map +1 -1
  65. package/dist/esm/tasks/automatic-speech-recognition/inference.d.ts +1 -5
  66. package/dist/esm/tasks/automatic-speech-recognition/inference.d.ts.map +1 -1
  67. package/dist/esm/tasks/chat-completion/inference.d.ts +18 -6
  68. package/dist/esm/tasks/chat-completion/inference.d.ts.map +1 -1
  69. package/dist/esm/tasks/depth-estimation/inference.d.ts +1 -1
  70. package/dist/esm/tasks/document-question-answering/inference.d.ts +1 -3
  71. package/dist/esm/tasks/document-question-answering/inference.d.ts.map +1 -1
  72. package/dist/esm/tasks/feature-extraction/inference.d.ts +1 -1
  73. package/dist/esm/tasks/fill-mask/inference.d.ts +1 -3
  74. package/dist/esm/tasks/fill-mask/inference.d.ts.map +1 -1
  75. package/dist/esm/tasks/image-classification/inference.d.ts +1 -3
  76. package/dist/esm/tasks/image-classification/inference.d.ts.map +1 -1
  77. package/dist/esm/tasks/image-segmentation/inference.d.ts +1 -3
  78. package/dist/esm/tasks/image-segmentation/inference.d.ts.map +1 -1
  79. package/dist/esm/tasks/image-to-image/inference.d.ts +1 -3
  80. package/dist/esm/tasks/image-to-image/inference.d.ts.map +1 -1
  81. package/dist/esm/tasks/image-to-text/inference.d.ts +1 -5
  82. package/dist/esm/tasks/image-to-text/inference.d.ts.map +1 -1
  83. package/dist/esm/tasks/object-detection/inference.d.ts +1 -3
  84. package/dist/esm/tasks/object-detection/inference.d.ts.map +1 -1
  85. package/dist/esm/tasks/question-answering/inference.d.ts +1 -3
  86. package/dist/esm/tasks/question-answering/inference.d.ts.map +1 -1
  87. package/dist/esm/tasks/sentence-similarity/inference.d.ts +1 -1
  88. package/dist/esm/tasks/summarization/inference.d.ts +1 -3
  89. package/dist/esm/tasks/summarization/inference.d.ts.map +1 -1
  90. package/dist/esm/tasks/table-question-answering/inference.d.ts +1 -1
  91. package/dist/esm/tasks/text-classification/inference.d.ts +1 -3
  92. package/dist/esm/tasks/text-classification/inference.d.ts.map +1 -1
  93. package/dist/esm/tasks/text-to-audio/inference.d.ts +1 -5
  94. package/dist/esm/tasks/text-to-audio/inference.d.ts.map +1 -1
  95. package/dist/esm/tasks/text-to-image/inference.d.ts +1 -3
  96. package/dist/esm/tasks/text-to-image/inference.d.ts.map +1 -1
  97. package/dist/esm/tasks/text-to-speech/inference.d.ts +1 -5
  98. package/dist/esm/tasks/text-to-speech/inference.d.ts.map +1 -1
  99. package/dist/esm/tasks/text2text-generation/inference.d.ts +1 -3
  100. package/dist/esm/tasks/text2text-generation/inference.d.ts.map +1 -1
  101. package/dist/esm/tasks/token-classification/inference.d.ts +1 -3
  102. package/dist/esm/tasks/token-classification/inference.d.ts.map +1 -1
  103. package/dist/esm/tasks/translation/inference.d.ts +1 -3
  104. package/dist/esm/tasks/translation/inference.d.ts.map +1 -1
  105. package/dist/esm/tasks/video-classification/inference.d.ts +1 -3
  106. package/dist/esm/tasks/video-classification/inference.d.ts.map +1 -1
  107. package/dist/esm/tasks/visual-question-answering/inference.d.ts +1 -3
  108. package/dist/esm/tasks/visual-question-answering/inference.d.ts.map +1 -1
  109. package/dist/esm/tasks/zero-shot-classification/inference.d.ts +1 -3
  110. package/dist/esm/tasks/zero-shot-classification/inference.d.ts.map +1 -1
  111. package/dist/esm/tasks/zero-shot-image-classification/inference.d.ts +1 -3
  112. package/dist/esm/tasks/zero-shot-image-classification/inference.d.ts.map +1 -1
  113. package/dist/esm/tasks/zero-shot-object-detection/inference.d.ts +1 -3
  114. package/dist/esm/tasks/zero-shot-object-detection/inference.d.ts.map +1 -1
  115. package/package.json +1 -1
  116. package/src/hardware.ts +15 -3
  117. package/src/snippets/curl.ts +4 -4
  118. package/src/snippets/js.ts +5 -5
  119. package/src/tasks/audio-classification/inference.ts +1 -3
  120. package/src/tasks/audio-classification/spec/input.json +1 -2
  121. package/src/tasks/automatic-speech-recognition/inference.ts +1 -5
  122. package/src/tasks/automatic-speech-recognition/spec/input.json +1 -2
  123. package/src/tasks/chat-completion/inference.ts +19 -6
  124. package/src/tasks/chat-completion/spec/input.json +14 -19
  125. package/src/tasks/common-definitions.json +0 -1
  126. package/src/tasks/depth-estimation/inference.ts +1 -1
  127. package/src/tasks/depth-estimation/spec/input.json +1 -2
  128. package/src/tasks/document-question-answering/inference.ts +1 -3
  129. package/src/tasks/document-question-answering/spec/input.json +1 -2
  130. package/src/tasks/feature-extraction/inference.ts +1 -1
  131. package/src/tasks/feature-extraction/spec/input.json +1 -1
  132. package/src/tasks/fill-mask/inference.ts +1 -3
  133. package/src/tasks/fill-mask/spec/input.json +1 -2
  134. package/src/tasks/image-classification/inference.ts +1 -3
  135. package/src/tasks/image-classification/spec/input.json +1 -2
  136. package/src/tasks/image-segmentation/inference.ts +1 -3
  137. package/src/tasks/image-segmentation/spec/input.json +1 -2
  138. package/src/tasks/image-to-image/inference.ts +1 -3
  139. package/src/tasks/image-to-image/spec/input.json +1 -2
  140. package/src/tasks/image-to-text/inference.ts +1 -5
  141. package/src/tasks/image-to-text/spec/input.json +1 -2
  142. package/src/tasks/object-detection/inference.ts +1 -3
  143. package/src/tasks/object-detection/spec/input.json +1 -2
  144. package/src/tasks/placeholder/spec/input.json +1 -2
  145. package/src/tasks/question-answering/inference.ts +1 -3
  146. package/src/tasks/question-answering/spec/input.json +1 -2
  147. package/src/tasks/sentence-similarity/inference.ts +1 -1
  148. package/src/tasks/sentence-similarity/spec/input.json +1 -2
  149. package/src/tasks/summarization/inference.ts +1 -3
  150. package/src/tasks/summarization/spec/input.json +1 -2
  151. package/src/tasks/table-question-answering/inference.ts +1 -1
  152. package/src/tasks/table-question-answering/spec/input.json +1 -2
  153. package/src/tasks/text-classification/inference.ts +1 -3
  154. package/src/tasks/text-classification/spec/input.json +1 -2
  155. package/src/tasks/text-to-audio/inference.ts +1 -5
  156. package/src/tasks/text-to-audio/spec/input.json +1 -2
  157. package/src/tasks/text-to-image/inference.ts +1 -3
  158. package/src/tasks/text-to-image/spec/input.json +1 -2
  159. package/src/tasks/text-to-speech/inference.ts +1 -5
  160. package/src/tasks/text-to-speech/spec/input.json +1 -2
  161. package/src/tasks/text2text-generation/inference.ts +1 -3
  162. package/src/tasks/text2text-generation/spec/input.json +1 -2
  163. package/src/tasks/token-classification/inference.ts +1 -3
  164. package/src/tasks/token-classification/spec/input.json +1 -2
  165. package/src/tasks/translation/inference.ts +1 -3
  166. package/src/tasks/translation/spec/input.json +1 -2
  167. package/src/tasks/video-classification/inference.ts +1 -3
  168. package/src/tasks/video-classification/spec/input.json +1 -2
  169. package/src/tasks/visual-question-answering/inference.ts +1 -3
  170. package/src/tasks/visual-question-answering/spec/input.json +1 -2
  171. package/src/tasks/zero-shot-classification/inference.ts +1 -3
  172. package/src/tasks/zero-shot-classification/spec/input.json +1 -2
  173. package/src/tasks/zero-shot-image-classification/inference.ts +1 -3
  174. package/src/tasks/zero-shot-image-classification/spec/input.json +1 -2
  175. package/src/tasks/zero-shot-object-detection/inference.ts +1 -3
  176. package/src/tasks/zero-shot-object-detection/spec/input.json +1 -2
  177. package/dist/commonjs/snippets/curl.spec.d.ts +0 -2
  178. package/dist/commonjs/snippets/curl.spec.d.ts.map +0 -1
  179. package/dist/commonjs/snippets/curl.spec.js +0 -89
  180. package/dist/commonjs/snippets/js.spec.d.ts +0 -2
  181. package/dist/commonjs/snippets/js.spec.d.ts.map +0 -1
  182. package/dist/commonjs/snippets/js.spec.js +0 -141
  183. package/dist/commonjs/snippets/python.spec.d.ts +0 -2
  184. package/dist/commonjs/snippets/python.spec.d.ts.map +0 -1
  185. package/dist/commonjs/snippets/python.spec.js +0 -135
  186. package/dist/esm/snippets/curl.spec.d.ts +0 -2
  187. package/dist/esm/snippets/curl.spec.d.ts.map +0 -1
  188. package/dist/esm/snippets/curl.spec.js +0 -87
  189. package/dist/esm/snippets/js.spec.d.ts +0 -2
  190. package/dist/esm/snippets/js.spec.d.ts.map +0 -1
  191. package/dist/esm/snippets/js.spec.js +0 -139
  192. package/dist/esm/snippets/python.spec.d.ts +0 -2
  193. package/dist/esm/snippets/python.spec.d.ts.map +0 -1
  194. package/dist/esm/snippets/python.spec.js +0 -133
  195. package/src/snippets/curl.spec.ts +0 -94
  196. package/src/snippets/js.spec.ts +0 -148
  197. package/src/snippets/python.spec.ts +0 -144
@@ -1,141 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const vitest_1 = require("vitest");
4
- const js_js_1 = require("./js.js");
5
- (0, vitest_1.describe)("inference API snippets", () => {
6
- (0, vitest_1.it)("conversational llm", async () => {
7
- const model = {
8
- id: "meta-llama/Llama-3.1-8B-Instruct",
9
- pipeline_tag: "text-generation",
10
- tags: ["conversational"],
11
- inference: "",
12
- };
13
- const snippet = (0, js_js_1.getJsInferenceSnippet)(model, "api_token");
14
- (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
15
-
16
- const client = new HfInference("api_token");
17
-
18
- let out = "";
19
-
20
- const stream = client.chatCompletionStream({
21
- model: "meta-llama/Llama-3.1-8B-Instruct",
22
- messages: [
23
- {
24
- role: "user",
25
- content: "What is the capital of France?"
26
- }
27
- ],
28
- max_tokens: 500
29
- });
30
-
31
- for await (const chunk of stream) {
32
- if (chunk.choices && chunk.choices.length > 0) {
33
- const newContent = chunk.choices[0].delta.content;
34
- out += newContent;
35
- console.log(newContent);
36
- }
37
- }`);
38
- });
39
- (0, vitest_1.it)("conversational llm non-streaming", async () => {
40
- const model = {
41
- id: "meta-llama/Llama-3.1-8B-Instruct",
42
- pipeline_tag: "text-generation",
43
- tags: ["conversational"],
44
- inference: "",
45
- };
46
- const snippet = (0, js_js_1.getJsInferenceSnippet)(model, "api_token", { streaming: false });
47
- (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
48
-
49
- const client = new HfInference("api_token");
50
-
51
- const chatCompletion = await client.chatCompletion({
52
- model: "meta-llama/Llama-3.1-8B-Instruct",
53
- messages: [
54
- {
55
- role: "user",
56
- content: "What is the capital of France?"
57
- }
58
- ],
59
- max_tokens: 500
60
- });
61
-
62
- console.log(chatCompletion.choices[0].message);`);
63
- });
64
- (0, vitest_1.it)("conversational vlm", async () => {
65
- const model = {
66
- id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
67
- pipeline_tag: "image-text-to-text",
68
- tags: ["conversational"],
69
- inference: "",
70
- };
71
- const snippet = (0, js_js_1.getJsInferenceSnippet)(model, "api_token");
72
- (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
73
-
74
- const client = new HfInference("api_token");
75
-
76
- let out = "";
77
-
78
- const stream = client.chatCompletionStream({
79
- model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
80
- messages: [
81
- {
82
- role: "user",
83
- content: [
84
- {
85
- type: "text",
86
- text: "Describe this image in one sentence."
87
- },
88
- {
89
- type: "image_url",
90
- image_url: {
91
- url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
92
- }
93
- }
94
- ]
95
- }
96
- ],
97
- max_tokens: 500
98
- });
99
-
100
- for await (const chunk of stream) {
101
- if (chunk.choices && chunk.choices.length > 0) {
102
- const newContent = chunk.choices[0].delta.content;
103
- out += newContent;
104
- console.log(newContent);
105
- }
106
- }`);
107
- });
108
- (0, vitest_1.it)("conversational llm", async () => {
109
- const model = {
110
- id: "meta-llama/Llama-3.1-8B-Instruct",
111
- pipeline_tag: "text-generation",
112
- tags: ["conversational"],
113
- inference: "",
114
- };
115
- const snippet = (0, js_js_1.getJsInferenceSnippet)(model, "api_token");
116
- (0, vitest_1.expect)(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
117
-
118
- const client = new HfInference("api_token");
119
-
120
- let out = "";
121
-
122
- const stream = client.chatCompletionStream({
123
- model: "meta-llama/Llama-3.1-8B-Instruct",
124
- messages: [
125
- {
126
- role: "user",
127
- content: "What is the capital of France?"
128
- }
129
- ],
130
- max_tokens: 500
131
- });
132
-
133
- for await (const chunk of stream) {
134
- if (chunk.choices && chunk.choices.length > 0) {
135
- const newContent = chunk.choices[0].delta.content;
136
- out += newContent;
137
- console.log(newContent);
138
- }
139
- }`);
140
- });
141
- });
@@ -1,2 +0,0 @@
1
- export {};
2
- //# sourceMappingURL=python.spec.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"python.spec.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.spec.ts"],"names":[],"mappings":""}
@@ -1,135 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const vitest_1 = require("vitest");
4
- const python_js_1 = require("./python.js");
5
- (0, vitest_1.describe)("inference API snippets", () => {
6
- (0, vitest_1.it)("conversational llm", async () => {
7
- const model = {
8
- id: "meta-llama/Llama-3.1-8B-Instruct",
9
- pipeline_tag: "text-generation",
10
- tags: ["conversational"],
11
- inference: "",
12
- };
13
- const snippet = (0, python_js_1.getPythonInferenceSnippet)(model, "api_token");
14
- (0, vitest_1.expect)(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
15
-
16
- client = InferenceClient(api_key="api_token")
17
-
18
- messages = [
19
- {
20
- "role": "user",
21
- "content": "What is the capital of France?"
22
- }
23
- ]
24
-
25
- stream = client.chat.completions.create(
26
- model="meta-llama/Llama-3.1-8B-Instruct",
27
- messages=messages,
28
- max_tokens=500,
29
- stream=True
30
- )
31
-
32
- for chunk in stream:
33
- print(chunk.choices[0].delta.content, end="")`);
34
- });
35
- (0, vitest_1.it)("conversational llm non-streaming", async () => {
36
- const model = {
37
- id: "meta-llama/Llama-3.1-8B-Instruct",
38
- pipeline_tag: "text-generation",
39
- tags: ["conversational"],
40
- inference: "",
41
- };
42
- const snippet = (0, python_js_1.getPythonInferenceSnippet)(model, "api_token", { streaming: false });
43
- (0, vitest_1.expect)(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
44
-
45
- client = InferenceClient(api_key="api_token")
46
-
47
- messages = [
48
- {
49
- "role": "user",
50
- "content": "What is the capital of France?"
51
- }
52
- ]
53
-
54
- completion = client.chat.completions.create(
55
- model="meta-llama/Llama-3.1-8B-Instruct",
56
- messages=messages,
57
- max_tokens=500
58
- )
59
-
60
- print(completion.choices[0].message)`);
61
- });
62
- (0, vitest_1.it)("conversational vlm", async () => {
63
- const model = {
64
- id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
65
- pipeline_tag: "image-text-to-text",
66
- tags: ["conversational"],
67
- inference: "",
68
- };
69
- const snippet = (0, python_js_1.getPythonInferenceSnippet)(model, "api_token");
70
- (0, vitest_1.expect)(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
71
-
72
- client = InferenceClient(api_key="api_token")
73
-
74
- messages = [
75
- {
76
- "role": "user",
77
- "content": [
78
- {
79
- "type": "text",
80
- "text": "Describe this image in one sentence."
81
- },
82
- {
83
- "type": "image_url",
84
- "image_url": {
85
- "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
86
- }
87
- }
88
- ]
89
- }
90
- ]
91
-
92
- stream = client.chat.completions.create(
93
- model="meta-llama/Llama-3.2-11B-Vision-Instruct",
94
- messages=messages,
95
- max_tokens=500,
96
- stream=True
97
- )
98
-
99
- for chunk in stream:
100
- print(chunk.choices[0].delta.content, end="")`);
101
- });
102
- (0, vitest_1.it)("text-to-image", async () => {
103
- const model = {
104
- id: "black-forest-labs/FLUX.1-schnell",
105
- pipeline_tag: "text-to-image",
106
- tags: [],
107
- inference: "",
108
- };
109
- const snippets = (0, python_js_1.getPythonInferenceSnippet)(model, "api_token");
110
- (0, vitest_1.expect)(snippets.length).toEqual(2);
111
- (0, vitest_1.expect)(snippets[0].client).toEqual("huggingface_hub");
112
- (0, vitest_1.expect)(snippets[0].content).toEqual(`from huggingface_hub import InferenceClient
113
- client = InferenceClient("black-forest-labs/FLUX.1-schnell", token="api_token")
114
-
115
- # output is a PIL.Image object
116
- image = client.text_to_image("Astronaut riding a horse")`);
117
- (0, vitest_1.expect)(snippets[1].client).toEqual("requests");
118
- (0, vitest_1.expect)(snippets[1].content).toEqual(`import requests
119
-
120
- API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
121
- headers = {"Authorization": "Bearer api_token"}
122
-
123
- def query(payload):
124
- response = requests.post(API_URL, headers=headers, json=payload)
125
- return response.content
126
- image_bytes = query({
127
- "inputs": "Astronaut riding a horse",
128
- })
129
-
130
- # You can access the image with PIL.Image for example
131
- import io
132
- from PIL import Image
133
- image = Image.open(io.BytesIO(image_bytes))`);
134
- });
135
- });
@@ -1,2 +0,0 @@
1
- export {};
2
- //# sourceMappingURL=curl.spec.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"curl.spec.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.spec.ts"],"names":[],"mappings":""}
@@ -1,87 +0,0 @@
1
- import { describe, expect, it } from "vitest";
2
- import { getCurlInferenceSnippet } from "./curl.js";
3
- describe("inference API snippets", () => {
4
- it("conversational llm", async () => {
5
- const model = {
6
- id: "meta-llama/Llama-3.1-8B-Instruct",
7
- pipeline_tag: "text-generation",
8
- tags: ["conversational"],
9
- inference: "",
10
- };
11
- const snippet = getCurlInferenceSnippet(model, "api_token");
12
- expect(snippet.content)
13
- .toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions' \\
14
- -H "Authorization: Bearer api_token" \\
15
- -H 'Content-Type: application/json' \\
16
- --data '{
17
- "model": "meta-llama/Llama-3.1-8B-Instruct",
18
- "messages": [
19
- {
20
- "role": "user",
21
- "content": "What is the capital of France?"
22
- }
23
- ],
24
- "max_tokens": 500,
25
- "stream": true
26
- }'`);
27
- });
28
- it("conversational llm non-streaming", async () => {
29
- const model = {
30
- id: "meta-llama/Llama-3.1-8B-Instruct",
31
- pipeline_tag: "text-generation",
32
- tags: ["conversational"],
33
- inference: "",
34
- };
35
- const snippet = getCurlInferenceSnippet(model, "api_token", { streaming: false });
36
- expect(snippet.content)
37
- .toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions' \\
38
- -H "Authorization: Bearer api_token" \\
39
- -H 'Content-Type: application/json' \\
40
- --data '{
41
- "model": "meta-llama/Llama-3.1-8B-Instruct",
42
- "messages": [
43
- {
44
- "role": "user",
45
- "content": "What is the capital of France?"
46
- }
47
- ],
48
- "max_tokens": 500,
49
- "stream": false
50
- }'`);
51
- });
52
- it("conversational vlm", async () => {
53
- const model = {
54
- id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
55
- pipeline_tag: "image-text-to-text",
56
- tags: ["conversational"],
57
- inference: "",
58
- };
59
- const snippet = getCurlInferenceSnippet(model, "api_token");
60
- expect(snippet.content)
61
- .toEqual(`curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions' \\
62
- -H "Authorization: Bearer api_token" \\
63
- -H 'Content-Type: application/json' \\
64
- --data '{
65
- "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
66
- "messages": [
67
- {
68
- "role": "user",
69
- "content": [
70
- {
71
- "type": "text",
72
- "text": "Describe this image in one sentence."
73
- },
74
- {
75
- "type": "image_url",
76
- "image_url": {
77
- "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
78
- }
79
- }
80
- ]
81
- }
82
- ],
83
- "max_tokens": 500,
84
- "stream": true
85
- }'`);
86
- });
87
- });
@@ -1,2 +0,0 @@
1
- export {};
2
- //# sourceMappingURL=js.spec.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"js.spec.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.spec.ts"],"names":[],"mappings":""}
@@ -1,139 +0,0 @@
1
- import { describe, expect, it } from "vitest";
2
- import { getJsInferenceSnippet } from "./js.js";
3
- describe("inference API snippets", () => {
4
- it("conversational llm", async () => {
5
- const model = {
6
- id: "meta-llama/Llama-3.1-8B-Instruct",
7
- pipeline_tag: "text-generation",
8
- tags: ["conversational"],
9
- inference: "",
10
- };
11
- const snippet = getJsInferenceSnippet(model, "api_token");
12
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
13
-
14
- const client = new HfInference("api_token");
15
-
16
- let out = "";
17
-
18
- const stream = client.chatCompletionStream({
19
- model: "meta-llama/Llama-3.1-8B-Instruct",
20
- messages: [
21
- {
22
- role: "user",
23
- content: "What is the capital of France?"
24
- }
25
- ],
26
- max_tokens: 500
27
- });
28
-
29
- for await (const chunk of stream) {
30
- if (chunk.choices && chunk.choices.length > 0) {
31
- const newContent = chunk.choices[0].delta.content;
32
- out += newContent;
33
- console.log(newContent);
34
- }
35
- }`);
36
- });
37
- it("conversational llm non-streaming", async () => {
38
- const model = {
39
- id: "meta-llama/Llama-3.1-8B-Instruct",
40
- pipeline_tag: "text-generation",
41
- tags: ["conversational"],
42
- inference: "",
43
- };
44
- const snippet = getJsInferenceSnippet(model, "api_token", { streaming: false });
45
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
46
-
47
- const client = new HfInference("api_token");
48
-
49
- const chatCompletion = await client.chatCompletion({
50
- model: "meta-llama/Llama-3.1-8B-Instruct",
51
- messages: [
52
- {
53
- role: "user",
54
- content: "What is the capital of France?"
55
- }
56
- ],
57
- max_tokens: 500
58
- });
59
-
60
- console.log(chatCompletion.choices[0].message);`);
61
- });
62
- it("conversational vlm", async () => {
63
- const model = {
64
- id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
65
- pipeline_tag: "image-text-to-text",
66
- tags: ["conversational"],
67
- inference: "",
68
- };
69
- const snippet = getJsInferenceSnippet(model, "api_token");
70
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
71
-
72
- const client = new HfInference("api_token");
73
-
74
- let out = "";
75
-
76
- const stream = client.chatCompletionStream({
77
- model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
78
- messages: [
79
- {
80
- role: "user",
81
- content: [
82
- {
83
- type: "text",
84
- text: "Describe this image in one sentence."
85
- },
86
- {
87
- type: "image_url",
88
- image_url: {
89
- url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
90
- }
91
- }
92
- ]
93
- }
94
- ],
95
- max_tokens: 500
96
- });
97
-
98
- for await (const chunk of stream) {
99
- if (chunk.choices && chunk.choices.length > 0) {
100
- const newContent = chunk.choices[0].delta.content;
101
- out += newContent;
102
- console.log(newContent);
103
- }
104
- }`);
105
- });
106
- it("conversational llm", async () => {
107
- const model = {
108
- id: "meta-llama/Llama-3.1-8B-Instruct",
109
- pipeline_tag: "text-generation",
110
- tags: ["conversational"],
111
- inference: "",
112
- };
113
- const snippet = getJsInferenceSnippet(model, "api_token");
114
- expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";
115
-
116
- const client = new HfInference("api_token");
117
-
118
- let out = "";
119
-
120
- const stream = client.chatCompletionStream({
121
- model: "meta-llama/Llama-3.1-8B-Instruct",
122
- messages: [
123
- {
124
- role: "user",
125
- content: "What is the capital of France?"
126
- }
127
- ],
128
- max_tokens: 500
129
- });
130
-
131
- for await (const chunk of stream) {
132
- if (chunk.choices && chunk.choices.length > 0) {
133
- const newContent = chunk.choices[0].delta.content;
134
- out += newContent;
135
- console.log(newContent);
136
- }
137
- }`);
138
- });
139
- });
@@ -1,2 +0,0 @@
1
- export {};
2
- //# sourceMappingURL=python.spec.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"python.spec.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.spec.ts"],"names":[],"mappings":""}
@@ -1,133 +0,0 @@
1
- import { describe, expect, it } from "vitest";
2
- import { getPythonInferenceSnippet } from "./python.js";
3
- describe("inference API snippets", () => {
4
- it("conversational llm", async () => {
5
- const model = {
6
- id: "meta-llama/Llama-3.1-8B-Instruct",
7
- pipeline_tag: "text-generation",
8
- tags: ["conversational"],
9
- inference: "",
10
- };
11
- const snippet = getPythonInferenceSnippet(model, "api_token");
12
- expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
13
-
14
- client = InferenceClient(api_key="api_token")
15
-
16
- messages = [
17
- {
18
- "role": "user",
19
- "content": "What is the capital of France?"
20
- }
21
- ]
22
-
23
- stream = client.chat.completions.create(
24
- model="meta-llama/Llama-3.1-8B-Instruct",
25
- messages=messages,
26
- max_tokens=500,
27
- stream=True
28
- )
29
-
30
- for chunk in stream:
31
- print(chunk.choices[0].delta.content, end="")`);
32
- });
33
- it("conversational llm non-streaming", async () => {
34
- const model = {
35
- id: "meta-llama/Llama-3.1-8B-Instruct",
36
- pipeline_tag: "text-generation",
37
- tags: ["conversational"],
38
- inference: "",
39
- };
40
- const snippet = getPythonInferenceSnippet(model, "api_token", { streaming: false });
41
- expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
42
-
43
- client = InferenceClient(api_key="api_token")
44
-
45
- messages = [
46
- {
47
- "role": "user",
48
- "content": "What is the capital of France?"
49
- }
50
- ]
51
-
52
- completion = client.chat.completions.create(
53
- model="meta-llama/Llama-3.1-8B-Instruct",
54
- messages=messages,
55
- max_tokens=500
56
- )
57
-
58
- print(completion.choices[0].message)`);
59
- });
60
- it("conversational vlm", async () => {
61
- const model = {
62
- id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
63
- pipeline_tag: "image-text-to-text",
64
- tags: ["conversational"],
65
- inference: "",
66
- };
67
- const snippet = getPythonInferenceSnippet(model, "api_token");
68
- expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
69
-
70
- client = InferenceClient(api_key="api_token")
71
-
72
- messages = [
73
- {
74
- "role": "user",
75
- "content": [
76
- {
77
- "type": "text",
78
- "text": "Describe this image in one sentence."
79
- },
80
- {
81
- "type": "image_url",
82
- "image_url": {
83
- "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
84
- }
85
- }
86
- ]
87
- }
88
- ]
89
-
90
- stream = client.chat.completions.create(
91
- model="meta-llama/Llama-3.2-11B-Vision-Instruct",
92
- messages=messages,
93
- max_tokens=500,
94
- stream=True
95
- )
96
-
97
- for chunk in stream:
98
- print(chunk.choices[0].delta.content, end="")`);
99
- });
100
- it("text-to-image", async () => {
101
- const model = {
102
- id: "black-forest-labs/FLUX.1-schnell",
103
- pipeline_tag: "text-to-image",
104
- tags: [],
105
- inference: "",
106
- };
107
- const snippets = getPythonInferenceSnippet(model, "api_token");
108
- expect(snippets.length).toEqual(2);
109
- expect(snippets[0].client).toEqual("huggingface_hub");
110
- expect(snippets[0].content).toEqual(`from huggingface_hub import InferenceClient
111
- client = InferenceClient("black-forest-labs/FLUX.1-schnell", token="api_token")
112
-
113
- # output is a PIL.Image object
114
- image = client.text_to_image("Astronaut riding a horse")`);
115
- expect(snippets[1].client).toEqual("requests");
116
- expect(snippets[1].content).toEqual(`import requests
117
-
118
- API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
119
- headers = {"Authorization": "Bearer api_token"}
120
-
121
- def query(payload):
122
- response = requests.post(API_URL, headers=headers, json=payload)
123
- return response.content
124
- image_bytes = query({
125
- "inputs": "Astronaut riding a horse",
126
- })
127
-
128
- # You can access the image with PIL.Image for example
129
- import io
130
- from PIL import Image
131
- image = Image.open(io.BytesIO(image_bytes))`);
132
- });
133
- });