@huggingface/tasks 0.13.1-test → 0.13.1-test2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (198) hide show
  1. package/package.json +4 -2
  2. package/src/dataset-libraries.ts +89 -0
  3. package/src/default-widget-inputs.ts +718 -0
  4. package/src/gguf.ts +40 -0
  5. package/src/hardware.ts +482 -0
  6. package/src/index.ts +59 -0
  7. package/src/library-to-tasks.ts +76 -0
  8. package/src/local-apps.ts +412 -0
  9. package/src/model-data.ts +149 -0
  10. package/src/model-libraries-downloads.ts +18 -0
  11. package/src/model-libraries-snippets.ts +1128 -0
  12. package/src/model-libraries.ts +820 -0
  13. package/src/pipelines.ts +698 -0
  14. package/src/snippets/common.ts +39 -0
  15. package/src/snippets/curl.spec.ts +94 -0
  16. package/src/snippets/curl.ts +120 -0
  17. package/src/snippets/index.ts +7 -0
  18. package/src/snippets/inputs.ts +167 -0
  19. package/src/snippets/js.spec.ts +148 -0
  20. package/src/snippets/js.ts +305 -0
  21. package/src/snippets/python.spec.ts +144 -0
  22. package/src/snippets/python.ts +321 -0
  23. package/src/snippets/types.ts +16 -0
  24. package/src/tasks/audio-classification/about.md +86 -0
  25. package/src/tasks/audio-classification/data.ts +81 -0
  26. package/src/tasks/audio-classification/inference.ts +52 -0
  27. package/src/tasks/audio-classification/spec/input.json +35 -0
  28. package/src/tasks/audio-classification/spec/output.json +11 -0
  29. package/src/tasks/audio-to-audio/about.md +56 -0
  30. package/src/tasks/audio-to-audio/data.ts +70 -0
  31. package/src/tasks/automatic-speech-recognition/about.md +90 -0
  32. package/src/tasks/automatic-speech-recognition/data.ts +82 -0
  33. package/src/tasks/automatic-speech-recognition/inference.ts +160 -0
  34. package/src/tasks/automatic-speech-recognition/spec/input.json +35 -0
  35. package/src/tasks/automatic-speech-recognition/spec/output.json +38 -0
  36. package/src/tasks/chat-completion/inference.ts +322 -0
  37. package/src/tasks/chat-completion/spec/input.json +350 -0
  38. package/src/tasks/chat-completion/spec/output.json +206 -0
  39. package/src/tasks/chat-completion/spec/stream_output.json +213 -0
  40. package/src/tasks/common-definitions.json +100 -0
  41. package/src/tasks/depth-estimation/about.md +45 -0
  42. package/src/tasks/depth-estimation/data.ts +70 -0
  43. package/src/tasks/depth-estimation/inference.ts +35 -0
  44. package/src/tasks/depth-estimation/spec/input.json +25 -0
  45. package/src/tasks/depth-estimation/spec/output.json +16 -0
  46. package/src/tasks/document-question-answering/about.md +53 -0
  47. package/src/tasks/document-question-answering/data.ts +85 -0
  48. package/src/tasks/document-question-answering/inference.ts +110 -0
  49. package/src/tasks/document-question-answering/spec/input.json +85 -0
  50. package/src/tasks/document-question-answering/spec/output.json +36 -0
  51. package/src/tasks/feature-extraction/about.md +72 -0
  52. package/src/tasks/feature-extraction/data.ts +57 -0
  53. package/src/tasks/feature-extraction/inference.ts +40 -0
  54. package/src/tasks/feature-extraction/spec/input.json +47 -0
  55. package/src/tasks/feature-extraction/spec/output.json +15 -0
  56. package/src/tasks/fill-mask/about.md +51 -0
  57. package/src/tasks/fill-mask/data.ts +79 -0
  58. package/src/tasks/fill-mask/inference.ts +62 -0
  59. package/src/tasks/fill-mask/spec/input.json +38 -0
  60. package/src/tasks/fill-mask/spec/output.json +29 -0
  61. package/src/tasks/image-classification/about.md +50 -0
  62. package/src/tasks/image-classification/data.ts +88 -0
  63. package/src/tasks/image-classification/inference.ts +52 -0
  64. package/src/tasks/image-classification/spec/input.json +35 -0
  65. package/src/tasks/image-classification/spec/output.json +11 -0
  66. package/src/tasks/image-feature-extraction/about.md +23 -0
  67. package/src/tasks/image-feature-extraction/data.ts +59 -0
  68. package/src/tasks/image-segmentation/about.md +63 -0
  69. package/src/tasks/image-segmentation/data.ts +99 -0
  70. package/src/tasks/image-segmentation/inference.ts +69 -0
  71. package/src/tasks/image-segmentation/spec/input.json +45 -0
  72. package/src/tasks/image-segmentation/spec/output.json +26 -0
  73. package/src/tasks/image-text-to-text/about.md +76 -0
  74. package/src/tasks/image-text-to-text/data.ts +102 -0
  75. package/src/tasks/image-to-3d/about.md +62 -0
  76. package/src/tasks/image-to-3d/data.ts +75 -0
  77. package/src/tasks/image-to-image/about.md +129 -0
  78. package/src/tasks/image-to-image/data.ts +101 -0
  79. package/src/tasks/image-to-image/inference.ts +68 -0
  80. package/src/tasks/image-to-image/spec/input.json +55 -0
  81. package/src/tasks/image-to-image/spec/output.json +12 -0
  82. package/src/tasks/image-to-text/about.md +61 -0
  83. package/src/tasks/image-to-text/data.ts +82 -0
  84. package/src/tasks/image-to-text/inference.ts +143 -0
  85. package/src/tasks/image-to-text/spec/input.json +34 -0
  86. package/src/tasks/image-to-text/spec/output.json +14 -0
  87. package/src/tasks/index.ts +312 -0
  88. package/src/tasks/keypoint-detection/about.md +57 -0
  89. package/src/tasks/keypoint-detection/data.ts +50 -0
  90. package/src/tasks/mask-generation/about.md +65 -0
  91. package/src/tasks/mask-generation/data.ts +55 -0
  92. package/src/tasks/object-detection/about.md +37 -0
  93. package/src/tasks/object-detection/data.ts +86 -0
  94. package/src/tasks/object-detection/inference.ts +75 -0
  95. package/src/tasks/object-detection/spec/input.json +31 -0
  96. package/src/tasks/object-detection/spec/output.json +50 -0
  97. package/src/tasks/placeholder/about.md +15 -0
  98. package/src/tasks/placeholder/data.ts +21 -0
  99. package/src/tasks/placeholder/spec/input.json +35 -0
  100. package/src/tasks/placeholder/spec/output.json +17 -0
  101. package/src/tasks/question-answering/about.md +56 -0
  102. package/src/tasks/question-answering/data.ts +75 -0
  103. package/src/tasks/question-answering/inference.ts +99 -0
  104. package/src/tasks/question-answering/spec/input.json +67 -0
  105. package/src/tasks/question-answering/spec/output.json +29 -0
  106. package/src/tasks/reinforcement-learning/about.md +167 -0
  107. package/src/tasks/reinforcement-learning/data.ts +75 -0
  108. package/src/tasks/sentence-similarity/about.md +97 -0
  109. package/src/tasks/sentence-similarity/data.ts +101 -0
  110. package/src/tasks/sentence-similarity/inference.ts +32 -0
  111. package/src/tasks/sentence-similarity/spec/input.json +40 -0
  112. package/src/tasks/sentence-similarity/spec/output.json +12 -0
  113. package/src/tasks/summarization/about.md +58 -0
  114. package/src/tasks/summarization/data.ts +76 -0
  115. package/src/tasks/summarization/inference.ts +57 -0
  116. package/src/tasks/summarization/spec/input.json +42 -0
  117. package/src/tasks/summarization/spec/output.json +14 -0
  118. package/src/tasks/table-question-answering/about.md +43 -0
  119. package/src/tasks/table-question-answering/data.ts +59 -0
  120. package/src/tasks/table-question-answering/inference.ts +61 -0
  121. package/src/tasks/table-question-answering/spec/input.json +44 -0
  122. package/src/tasks/table-question-answering/spec/output.json +40 -0
  123. package/src/tasks/tabular-classification/about.md +65 -0
  124. package/src/tasks/tabular-classification/data.ts +68 -0
  125. package/src/tasks/tabular-regression/about.md +87 -0
  126. package/src/tasks/tabular-regression/data.ts +57 -0
  127. package/src/tasks/text-classification/about.md +173 -0
  128. package/src/tasks/text-classification/data.ts +103 -0
  129. package/src/tasks/text-classification/inference.ts +51 -0
  130. package/src/tasks/text-classification/spec/input.json +35 -0
  131. package/src/tasks/text-classification/spec/output.json +11 -0
  132. package/src/tasks/text-generation/about.md +154 -0
  133. package/src/tasks/text-generation/data.ts +114 -0
  134. package/src/tasks/text-generation/inference.ts +200 -0
  135. package/src/tasks/text-generation/spec/input.json +219 -0
  136. package/src/tasks/text-generation/spec/output.json +179 -0
  137. package/src/tasks/text-generation/spec/stream_output.json +103 -0
  138. package/src/tasks/text-to-3d/about.md +62 -0
  139. package/src/tasks/text-to-3d/data.ts +56 -0
  140. package/src/tasks/text-to-audio/inference.ts +143 -0
  141. package/src/tasks/text-to-audio/spec/input.json +31 -0
  142. package/src/tasks/text-to-audio/spec/output.json +17 -0
  143. package/src/tasks/text-to-image/about.md +96 -0
  144. package/src/tasks/text-to-image/data.ts +100 -0
  145. package/src/tasks/text-to-image/inference.ts +75 -0
  146. package/src/tasks/text-to-image/spec/input.json +63 -0
  147. package/src/tasks/text-to-image/spec/output.json +13 -0
  148. package/src/tasks/text-to-speech/about.md +63 -0
  149. package/src/tasks/text-to-speech/data.ts +79 -0
  150. package/src/tasks/text-to-speech/inference.ts +145 -0
  151. package/src/tasks/text-to-speech/spec/input.json +31 -0
  152. package/src/tasks/text-to-speech/spec/output.json +7 -0
  153. package/src/tasks/text-to-video/about.md +41 -0
  154. package/src/tasks/text-to-video/data.ts +102 -0
  155. package/src/tasks/text2text-generation/inference.ts +55 -0
  156. package/src/tasks/text2text-generation/spec/input.json +55 -0
  157. package/src/tasks/text2text-generation/spec/output.json +14 -0
  158. package/src/tasks/token-classification/about.md +76 -0
  159. package/src/tasks/token-classification/data.ts +92 -0
  160. package/src/tasks/token-classification/inference.ts +85 -0
  161. package/src/tasks/token-classification/spec/input.json +65 -0
  162. package/src/tasks/token-classification/spec/output.json +37 -0
  163. package/src/tasks/translation/about.md +65 -0
  164. package/src/tasks/translation/data.ts +70 -0
  165. package/src/tasks/translation/inference.ts +67 -0
  166. package/src/tasks/translation/spec/input.json +50 -0
  167. package/src/tasks/translation/spec/output.json +14 -0
  168. package/src/tasks/unconditional-image-generation/about.md +50 -0
  169. package/src/tasks/unconditional-image-generation/data.ts +72 -0
  170. package/src/tasks/video-classification/about.md +37 -0
  171. package/src/tasks/video-classification/data.ts +84 -0
  172. package/src/tasks/video-classification/inference.ts +59 -0
  173. package/src/tasks/video-classification/spec/input.json +42 -0
  174. package/src/tasks/video-classification/spec/output.json +10 -0
  175. package/src/tasks/video-text-to-text/about.md +98 -0
  176. package/src/tasks/video-text-to-text/data.ts +66 -0
  177. package/src/tasks/visual-question-answering/about.md +48 -0
  178. package/src/tasks/visual-question-answering/data.ts +97 -0
  179. package/src/tasks/visual-question-answering/inference.ts +62 -0
  180. package/src/tasks/visual-question-answering/spec/input.json +41 -0
  181. package/src/tasks/visual-question-answering/spec/output.json +21 -0
  182. package/src/tasks/zero-shot-classification/about.md +40 -0
  183. package/src/tasks/zero-shot-classification/data.ts +70 -0
  184. package/src/tasks/zero-shot-classification/inference.ts +67 -0
  185. package/src/tasks/zero-shot-classification/spec/input.json +50 -0
  186. package/src/tasks/zero-shot-classification/spec/output.json +11 -0
  187. package/src/tasks/zero-shot-image-classification/about.md +75 -0
  188. package/src/tasks/zero-shot-image-classification/data.ts +84 -0
  189. package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
  190. package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
  191. package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
  192. package/src/tasks/zero-shot-object-detection/about.md +45 -0
  193. package/src/tasks/zero-shot-object-detection/data.ts +67 -0
  194. package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
  195. package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
  196. package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
  197. package/src/tokenizer-data.ts +32 -0
  198. package/src/widget-example.ts +125 -0
@@ -0,0 +1,305 @@
1
+ import type { PipelineType } from "../pipelines.js";
2
+ import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
3
+ import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
4
+ import { getModelInputSnippet } from "./inputs.js";
5
+ import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
6
+
7
+ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
8
+ content: `async function query(data) {
9
+ const response = await fetch(
10
+ "https://api-inference.huggingface.co/models/${model.id}",
11
+ {
12
+ headers: {
13
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
14
+ "Content-Type": "application/json",
15
+ },
16
+ method: "POST",
17
+ body: JSON.stringify(data),
18
+ }
19
+ );
20
+ const result = await response.json();
21
+ return result;
22
+ }
23
+
24
+ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
25
+ console.log(JSON.stringify(response));
26
+ });`,
27
+ });
28
+
29
+ export const snippetTextGeneration = (
30
+ model: ModelDataMinimal,
31
+ accessToken: string,
32
+ opts?: {
33
+ streaming?: boolean;
34
+ messages?: ChatCompletionInputMessage[];
35
+ temperature?: GenerationParameters["temperature"];
36
+ max_tokens?: GenerationParameters["max_tokens"];
37
+ top_p?: GenerationParameters["top_p"];
38
+ }
39
+ ): InferenceSnippet | InferenceSnippet[] => {
40
+ if (model.tags.includes("conversational")) {
41
+ // Conversational model detected, so we display a code snippet that features the Messages API
42
+ const streaming = opts?.streaming ?? true;
43
+ const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
44
+ const messages = opts?.messages ?? exampleMessages;
45
+ const messagesStr = stringifyMessages(messages, { indent: "\t" });
46
+
47
+ const config = {
48
+ ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
49
+ max_tokens: opts?.max_tokens ?? 500,
50
+ ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
51
+ };
52
+ const configStr = stringifyGenerationConfig(config, {
53
+ indent: "\n\t",
54
+ attributeValueConnector: ": ",
55
+ });
56
+
57
+ if (streaming) {
58
+ return [
59
+ {
60
+ client: "huggingface.js",
61
+ content: `import { HfInference } from "@huggingface/inference"
62
+
63
+ const client = new HfInference("${accessToken || `{API_TOKEN}`}")
64
+
65
+ let out = "";
66
+
67
+ const stream = client.chatCompletionStream({
68
+ model: "${model.id}",
69
+ messages: ${messagesStr},
70
+ ${configStr}
71
+ });
72
+
73
+ for await (const chunk of stream) {
74
+ if (chunk.choices && chunk.choices.length > 0) {
75
+ const newContent = chunk.choices[0].delta.content;
76
+ out += newContent;
77
+ console.log(newContent);
78
+ }
79
+ }`,
80
+ },
81
+ {
82
+ client: "openai",
83
+ content: `import { OpenAI } from "openai"
84
+
85
+ const client = new OpenAI({
86
+ baseURL: "https://api-inference.huggingface.co/v1/",
87
+ apiKey: "${accessToken || `{API_TOKEN}`}"
88
+ })
89
+
90
+ let out = "";
91
+
92
+ const stream = await client.chat.completions.create({
93
+ model: "${model.id}",
94
+ messages: ${messagesStr},
95
+ ${configStr},
96
+ stream: true,
97
+ });
98
+
99
+ for await (const chunk of stream) {
100
+ if (chunk.choices && chunk.choices.length > 0) {
101
+ const newContent = chunk.choices[0].delta.content;
102
+ out += newContent;
103
+ console.log(newContent);
104
+ }
105
+ }`,
106
+ },
107
+ ];
108
+ } else {
109
+ return [
110
+ {
111
+ client: "huggingface.js",
112
+ content: `import { HfInference } from "@huggingface/inference"
113
+
114
+ const client = new HfInference("${accessToken || `{API_TOKEN}`}")
115
+
116
+ const chatCompletion = await client.chatCompletion({
117
+ model: "${model.id}",
118
+ messages: ${messagesStr},
119
+ ${configStr}
120
+ });
121
+
122
+ console.log(chatCompletion.choices[0].message);`,
123
+ },
124
+ {
125
+ client: "openai",
126
+ content: `import { OpenAI } from "openai"
127
+
128
+ const client = new OpenAI({
129
+ baseURL: "https://api-inference.huggingface.co/v1/",
130
+ apiKey: "${accessToken || `{API_TOKEN}`}"
131
+ })
132
+
133
+ const chatCompletion = await client.chat.completions.create({
134
+ model: "${model.id}",
135
+ messages: ${messagesStr},
136
+ ${configStr}
137
+ });
138
+
139
+ console.log(chatCompletion.choices[0].message);`,
140
+ },
141
+ ];
142
+ }
143
+ } else {
144
+ return snippetBasic(model, accessToken);
145
+ }
146
+ };
147
+
148
+ export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
149
+ content: `async function query(data) {
150
+ const response = await fetch(
151
+ "https://api-inference.huggingface.co/models/${model.id}",
152
+ {
153
+ headers: {
154
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
155
+ "Content-Type": "application/json",
156
+ },
157
+ method: "POST",
158
+ body: JSON.stringify(data),
159
+ }
160
+ );
161
+ const result = await response.json();
162
+ return result;
163
+ }
164
+
165
+ query({"inputs": ${getModelInputSnippet(
166
+ model
167
+ )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
168
+ console.log(JSON.stringify(response));
169
+ });`,
170
+ });
171
+
172
+ export const snippetTextToImage = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
173
+ content: `async function query(data) {
174
+ const response = await fetch(
175
+ "https://api-inference.huggingface.co/models/${model.id}",
176
+ {
177
+ headers: {
178
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
179
+ "Content-Type": "application/json",
180
+ },
181
+ method: "POST",
182
+ body: JSON.stringify(data),
183
+ }
184
+ );
185
+ const result = await response.blob();
186
+ return result;
187
+ }
188
+ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
189
+ // Use image
190
+ });`,
191
+ });
192
+
193
+ export const snippetTextToAudio = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => {
194
+ const commonSnippet = `async function query(data) {
195
+ const response = await fetch(
196
+ "https://api-inference.huggingface.co/models/${model.id}",
197
+ {
198
+ headers: {
199
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
200
+ "Content-Type": "application/json",
201
+ },
202
+ method: "POST",
203
+ body: JSON.stringify(data),
204
+ }
205
+ );`;
206
+ if (model.library_name === "transformers") {
207
+ return {
208
+ content:
209
+ commonSnippet +
210
+ `
211
+ const result = await response.blob();
212
+ return result;
213
+ }
214
+ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
215
+ // Returns a byte object of the Audio wavform. Use it directly!
216
+ });`,
217
+ };
218
+ } else {
219
+ return {
220
+ content:
221
+ commonSnippet +
222
+ `
223
+ const result = await response.json();
224
+ return result;
225
+ }
226
+
227
+ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
228
+ console.log(JSON.stringify(response));
229
+ });`,
230
+ };
231
+ }
232
+ };
233
+
234
+ export const snippetFile = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
235
+ content: `async function query(filename) {
236
+ const data = fs.readFileSync(filename);
237
+ const response = await fetch(
238
+ "https://api-inference.huggingface.co/models/${model.id}",
239
+ {
240
+ headers: {
241
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
242
+ "Content-Type": "application/json",
243
+ },
244
+ method: "POST",
245
+ body: data,
246
+ }
247
+ );
248
+ const result = await response.json();
249
+ return result;
250
+ }
251
+
252
+ query(${getModelInputSnippet(model)}).then((response) => {
253
+ console.log(JSON.stringify(response));
254
+ });`,
255
+ });
256
+
257
+ export const jsSnippets: Partial<
258
+ Record<
259
+ PipelineType,
260
+ (
261
+ model: ModelDataMinimal,
262
+ accessToken: string,
263
+ opts?: Record<string, unknown>
264
+ ) => InferenceSnippet | InferenceSnippet[]
265
+ >
266
+ > = {
267
+ // Same order as in js/src/lib/interfaces/Types.ts
268
+ "text-classification": snippetBasic,
269
+ "token-classification": snippetBasic,
270
+ "table-question-answering": snippetBasic,
271
+ "question-answering": snippetBasic,
272
+ "zero-shot-classification": snippetZeroShotClassification,
273
+ translation: snippetBasic,
274
+ summarization: snippetBasic,
275
+ "feature-extraction": snippetBasic,
276
+ "text-generation": snippetTextGeneration,
277
+ "image-text-to-text": snippetTextGeneration,
278
+ "text2text-generation": snippetBasic,
279
+ "fill-mask": snippetBasic,
280
+ "sentence-similarity": snippetBasic,
281
+ "automatic-speech-recognition": snippetFile,
282
+ "text-to-image": snippetTextToImage,
283
+ "text-to-speech": snippetTextToAudio,
284
+ "text-to-audio": snippetTextToAudio,
285
+ "audio-to-audio": snippetFile,
286
+ "audio-classification": snippetFile,
287
+ "image-classification": snippetFile,
288
+ "image-to-text": snippetFile,
289
+ "object-detection": snippetFile,
290
+ "image-segmentation": snippetFile,
291
+ };
292
+
293
+ export function getJsInferenceSnippet(
294
+ model: ModelDataMinimal,
295
+ accessToken: string,
296
+ opts?: Record<string, unknown>
297
+ ): InferenceSnippet | InferenceSnippet[] {
298
+ return model.pipeline_tag && model.pipeline_tag in jsSnippets
299
+ ? jsSnippets[model.pipeline_tag]?.(model, accessToken, opts) ?? { content: "" }
300
+ : { content: "" };
301
+ }
302
+
303
+ export function hasJsInferenceSnippet(model: ModelDataMinimal): boolean {
304
+ return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
305
+ }
@@ -0,0 +1,144 @@
1
+ import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
2
+ import { describe, expect, it } from "vitest";
3
+ import { getPythonInferenceSnippet } from "./python.js";
4
+
5
+ describe("inference API snippets", () => {
6
+ it("conversational llm", async () => {
7
+ const model: ModelDataMinimal = {
8
+ id: "meta-llama/Llama-3.1-8B-Instruct",
9
+ pipeline_tag: "text-generation",
10
+ tags: ["conversational"],
11
+ inference: "",
12
+ };
13
+ const snippet = getPythonInferenceSnippet(model, "api_token") as InferenceSnippet[];
14
+
15
+ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
16
+
17
+ client = InferenceClient(api_key="api_token")
18
+
19
+ messages = [
20
+ {
21
+ "role": "user",
22
+ "content": "What is the capital of France?"
23
+ }
24
+ ]
25
+
26
+ stream = client.chat.completions.create(
27
+ model="meta-llama/Llama-3.1-8B-Instruct",
28
+ messages=messages,
29
+ max_tokens=500,
30
+ stream=True
31
+ )
32
+
33
+ for chunk in stream:
34
+ print(chunk.choices[0].delta.content, end="")`);
35
+ });
36
+
37
+ it("conversational llm non-streaming", async () => {
38
+ const model: ModelDataMinimal = {
39
+ id: "meta-llama/Llama-3.1-8B-Instruct",
40
+ pipeline_tag: "text-generation",
41
+ tags: ["conversational"],
42
+ inference: "",
43
+ };
44
+ const snippet = getPythonInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[];
45
+
46
+ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
47
+
48
+ client = InferenceClient(api_key="api_token")
49
+
50
+ messages = [
51
+ {
52
+ "role": "user",
53
+ "content": "What is the capital of France?"
54
+ }
55
+ ]
56
+
57
+ completion = client.chat.completions.create(
58
+ model="meta-llama/Llama-3.1-8B-Instruct",
59
+ messages=messages,
60
+ max_tokens=500
61
+ )
62
+
63
+ print(completion.choices[0].message)`);
64
+ });
65
+
66
+ it("conversational vlm", async () => {
67
+ const model: ModelDataMinimal = {
68
+ id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
69
+ pipeline_tag: "image-text-to-text",
70
+ tags: ["conversational"],
71
+ inference: "",
72
+ };
73
+ const snippet = getPythonInferenceSnippet(model, "api_token") as InferenceSnippet[];
74
+
75
+ expect(snippet[0].content).toEqual(`from huggingface_hub import InferenceClient
76
+
77
+ client = InferenceClient(api_key="api_token")
78
+
79
+ messages = [
80
+ {
81
+ "role": "user",
82
+ "content": [
83
+ {
84
+ "type": "text",
85
+ "text": "Describe this image in one sentence."
86
+ },
87
+ {
88
+ "type": "image_url",
89
+ "image_url": {
90
+ "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
91
+ }
92
+ }
93
+ ]
94
+ }
95
+ ]
96
+
97
+ stream = client.chat.completions.create(
98
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct",
99
+ messages=messages,
100
+ max_tokens=500,
101
+ stream=True
102
+ )
103
+
104
+ for chunk in stream:
105
+ print(chunk.choices[0].delta.content, end="")`);
106
+ });
107
+
108
+ it("text-to-image", async () => {
109
+ const model: ModelDataMinimal = {
110
+ id: "black-forest-labs/FLUX.1-schnell",
111
+ pipeline_tag: "text-to-image",
112
+ tags: [],
113
+ inference: "",
114
+ };
115
+ const snippets = getPythonInferenceSnippet(model, "api_token") as InferenceSnippet[];
116
+
117
+ expect(snippets.length).toEqual(2);
118
+
119
+ expect(snippets[0].client).toEqual("huggingface_hub");
120
+ expect(snippets[0].content).toEqual(`from huggingface_hub import InferenceClient
121
+ client = InferenceClient("black-forest-labs/FLUX.1-schnell", token="api_token")
122
+
123
+ # output is a PIL.Image object
124
+ image = client.text_to_image("Astronaut riding a horse")`);
125
+
126
+ expect(snippets[1].client).toEqual("requests");
127
+ expect(snippets[1].content).toEqual(`import requests
128
+
129
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
130
+ headers = {"Authorization": "Bearer api_token"}
131
+
132
+ def query(payload):
133
+ response = requests.post(API_URL, headers=headers, json=payload)
134
+ return response.content
135
+ image_bytes = query({
136
+ "inputs": "Astronaut riding a horse",
137
+ })
138
+
139
+ # You can access the image with PIL.Image for example
140
+ import io
141
+ from PIL import Image
142
+ image = Image.open(io.BytesIO(image_bytes))`);
143
+ });
144
+ });