@huggingface/tasks 0.16.7 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/commonjs/index.d.ts +1 -3
  2. package/dist/commonjs/index.d.ts.map +1 -1
  3. package/dist/commonjs/index.js +5 -15
  4. package/dist/commonjs/snippets/index.d.ts +2 -5
  5. package/dist/commonjs/snippets/index.d.ts.map +1 -1
  6. package/dist/commonjs/snippets/index.js +2 -21
  7. package/dist/esm/index.d.ts +1 -3
  8. package/dist/esm/index.d.ts.map +1 -1
  9. package/dist/esm/index.js +1 -2
  10. package/dist/esm/snippets/index.d.ts +2 -5
  11. package/dist/esm/snippets/index.d.ts.map +1 -1
  12. package/dist/esm/snippets/index.js +2 -5
  13. package/package.json +1 -1
  14. package/src/index.ts +7 -3
  15. package/src/snippets/index.ts +2 -6
  16. package/dist/commonjs/snippets/curl.d.ts +0 -17
  17. package/dist/commonjs/snippets/curl.d.ts.map +0 -1
  18. package/dist/commonjs/snippets/curl.js +0 -129
  19. package/dist/commonjs/snippets/js.d.ts +0 -21
  20. package/dist/commonjs/snippets/js.d.ts.map +0 -1
  21. package/dist/commonjs/snippets/js.js +0 -413
  22. package/dist/commonjs/snippets/python.d.ts +0 -23
  23. package/dist/commonjs/snippets/python.d.ts.map +0 -1
  24. package/dist/commonjs/snippets/python.js +0 -435
  25. package/dist/esm/snippets/curl.d.ts +0 -17
  26. package/dist/esm/snippets/curl.d.ts.map +0 -1
  27. package/dist/esm/snippets/curl.js +0 -121
  28. package/dist/esm/snippets/js.d.ts +0 -21
  29. package/dist/esm/snippets/js.d.ts.map +0 -1
  30. package/dist/esm/snippets/js.js +0 -401
  31. package/dist/esm/snippets/python.d.ts +0 -23
  32. package/dist/esm/snippets/python.d.ts.map +0 -1
  33. package/dist/esm/snippets/python.js +0 -421
  34. package/src/snippets/curl.ts +0 -173
  35. package/src/snippets/js.ts +0 -471
  36. package/src/snippets/python.ts +0 -483
@@ -1,471 +0,0 @@
1
- import { openAIbaseUrl, type SnippetInferenceProvider } from "../inference-providers.js";
2
- import type { PipelineType, WidgetType } from "../pipelines.js";
3
- import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
- import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
5
- import { getModelInputSnippet } from "./inputs.js";
6
- import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
7
-
8
- const HFJS_METHODS: Partial<Record<WidgetType, string>> = {
9
- "text-classification": "textClassification",
10
- "token-classification": "tokenClassification",
11
- "table-question-answering": "tableQuestionAnswering",
12
- "question-answering": "questionAnswering",
13
- translation: "translation",
14
- summarization: "summarization",
15
- "feature-extraction": "featureExtraction",
16
- "text-generation": "textGeneration",
17
- "text2text-generation": "textGeneration",
18
- "fill-mask": "fillMask",
19
- "sentence-similarity": "sentenceSimilarity",
20
- };
21
-
22
- export const snippetBasic = (
23
- model: ModelDataMinimal,
24
- accessToken: string,
25
- provider: SnippetInferenceProvider
26
- ): InferenceSnippet[] => {
27
- return [
28
- ...(model.pipeline_tag && model.pipeline_tag in HFJS_METHODS
29
- ? [
30
- {
31
- client: "huggingface.js",
32
- content: `\
33
- import { HfInference } from "@huggingface/inference";
34
-
35
- const client = new HfInference("${accessToken || `{API_TOKEN}`}");
36
-
37
- const output = await client.${HFJS_METHODS[model.pipeline_tag]}({
38
- model: "${model.id}",
39
- inputs: ${getModelInputSnippet(model)},
40
- provider: "${provider}",
41
- });
42
-
43
- console.log(output);
44
- `,
45
- },
46
- ]
47
- : []),
48
- {
49
- client: "fetch",
50
- content: `\
51
- async function query(data) {
52
- const response = await fetch(
53
- "https://router.huggingface.co/hf-inference/models/${model.id}",
54
- {
55
- headers: {
56
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
57
- "Content-Type": "application/json",
58
- },
59
- method: "POST",
60
- body: JSON.stringify(data),
61
- }
62
- );
63
- const result = await response.json();
64
- return result;
65
- }
66
-
67
- query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
68
- console.log(JSON.stringify(response));
69
- });`,
70
- },
71
- ];
72
- };
73
-
74
- export const snippetTextGeneration = (
75
- model: ModelDataMinimal,
76
- accessToken: string,
77
- provider: SnippetInferenceProvider,
78
- providerModelId?: string,
79
- opts?: {
80
- streaming?: boolean;
81
- messages?: ChatCompletionInputMessage[];
82
- temperature?: GenerationParameters["temperature"];
83
- max_tokens?: GenerationParameters["max_tokens"];
84
- top_p?: GenerationParameters["top_p"];
85
- }
86
- ): InferenceSnippet[] => {
87
- if (model.tags.includes("conversational")) {
88
- // Conversational model detected, so we display a code snippet that features the Messages API
89
- const streaming = opts?.streaming ?? true;
90
- const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
91
- const messages = opts?.messages ?? exampleMessages;
92
- const messagesStr = stringifyMessages(messages, { indent: "\t" });
93
-
94
- const config = {
95
- ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
96
- max_tokens: opts?.max_tokens ?? 500,
97
- ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
98
- };
99
- const configStr = stringifyGenerationConfig(config, {
100
- indent: "\n\t",
101
- attributeValueConnector: ": ",
102
- });
103
-
104
- if (streaming) {
105
- return [
106
- {
107
- client: "huggingface.js",
108
- content: `import { HfInference } from "@huggingface/inference";
109
-
110
- const client = new HfInference("${accessToken || `{API_TOKEN}`}");
111
-
112
- let out = "";
113
-
114
- const stream = client.chatCompletionStream({
115
- model: "${model.id}",
116
- messages: ${messagesStr},
117
- provider: "${provider}",
118
- ${configStr}
119
- });
120
-
121
- for await (const chunk of stream) {
122
- if (chunk.choices && chunk.choices.length > 0) {
123
- const newContent = chunk.choices[0].delta.content;
124
- out += newContent;
125
- console.log(newContent);
126
- }
127
- }`,
128
- },
129
- {
130
- client: "openai",
131
- content: `import { OpenAI } from "openai";
132
-
133
- const client = new OpenAI({
134
- baseURL: "${openAIbaseUrl(provider)}",
135
- apiKey: "${accessToken || `{API_TOKEN}`}"
136
- });
137
-
138
- let out = "";
139
-
140
- const stream = await client.chat.completions.create({
141
- model: "${providerModelId ?? model.id}",
142
- messages: ${messagesStr},
143
- ${configStr}
144
- stream: true,
145
- });
146
-
147
- for await (const chunk of stream) {
148
- if (chunk.choices && chunk.choices.length > 0) {
149
- const newContent = chunk.choices[0].delta.content;
150
- out += newContent;
151
- console.log(newContent);
152
- }
153
- }`,
154
- },
155
- ];
156
- } else {
157
- return [
158
- {
159
- client: "huggingface.js",
160
- content: `import { HfInference } from "@huggingface/inference";
161
-
162
- const client = new HfInference("${accessToken || `{API_TOKEN}`}");
163
-
164
- const chatCompletion = await client.chatCompletion({
165
- model: "${model.id}",
166
- messages: ${messagesStr},
167
- provider: "${provider}",
168
- ${configStr}
169
- });
170
-
171
- console.log(chatCompletion.choices[0].message);
172
- `,
173
- },
174
- {
175
- client: "openai",
176
- content: `import { OpenAI } from "openai";
177
-
178
- const client = new OpenAI({
179
- baseURL: "${openAIbaseUrl(provider)}",
180
- apiKey: "${accessToken || `{API_TOKEN}`}"
181
- });
182
-
183
- const chatCompletion = await client.chat.completions.create({
184
- model: "${providerModelId ?? model.id}",
185
- messages: ${messagesStr},
186
- ${configStr}
187
- });
188
-
189
- console.log(chatCompletion.choices[0].message);
190
- `,
191
- },
192
- ];
193
- }
194
- } else {
195
- return snippetBasic(model, accessToken, provider);
196
- }
197
- };
198
-
199
- export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet[] => {
200
- return [
201
- {
202
- client: "fetch",
203
- content: `async function query(data) {
204
- const response = await fetch(
205
- "https://router.huggingface.co/hf-inference/models/${model.id}",
206
- {
207
- headers: {
208
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
209
- "Content-Type": "application/json",
210
- },
211
- method: "POST",
212
- body: JSON.stringify(data),
213
- }
214
- );
215
- const result = await response.json();
216
- return result;
217
- }
218
-
219
- query({"inputs": ${getModelInputSnippet(
220
- model
221
- )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
222
- console.log(JSON.stringify(response));
223
- });`,
224
- },
225
- ];
226
- };
227
-
228
- export const snippetTextToImage = (
229
- model: ModelDataMinimal,
230
- accessToken: string,
231
- provider: SnippetInferenceProvider
232
- ): InferenceSnippet[] => {
233
- return [
234
- {
235
- client: "huggingface.js",
236
- content: `\
237
- import { HfInference } from "@huggingface/inference";
238
-
239
- const client = new HfInference("${accessToken || `{API_TOKEN}`}");
240
-
241
- const image = await client.textToImage({
242
- model: "${model.id}",
243
- inputs: ${getModelInputSnippet(model)},
244
- parameters: { num_inference_steps: 5 },
245
- provider: "${provider}",
246
- });
247
- /// Use the generated image (it's a Blob)
248
- `,
249
- },
250
- ...(provider === "hf-inference"
251
- ? [
252
- {
253
- client: "fetch",
254
- content: `async function query(data) {
255
- const response = await fetch(
256
- "https://router.huggingface.co/hf-inference/models/${model.id}",
257
- {
258
- headers: {
259
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
260
- "Content-Type": "application/json",
261
- },
262
- method: "POST",
263
- body: JSON.stringify(data),
264
- }
265
- );
266
- const result = await response.blob();
267
- return result;
268
- }
269
- query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
270
- // Use image
271
- });`,
272
- },
273
- ]
274
- : []),
275
- ];
276
- };
277
-
278
- export const snippetTextToVideo = (
279
- model: ModelDataMinimal,
280
- accessToken: string,
281
- provider: SnippetInferenceProvider
282
- ): InferenceSnippet[] => {
283
- return ["fal-ai", "replicate"].includes(provider)
284
- ? [
285
- {
286
- client: "huggingface.js",
287
- content: `\
288
- import { HfInference } from "@huggingface/inference";
289
-
290
- const client = new HfInference("${accessToken || `{API_TOKEN}`}");
291
-
292
- const video = await client.textToVideo({
293
- model: "${model.id}",
294
- provider: "${provider}",
295
- inputs: ${getModelInputSnippet(model)},
296
- parameters: { num_inference_steps: 5 },
297
- });
298
- // Use the generated video (it's a Blob)
299
- `,
300
- },
301
- ]
302
- : [];
303
- };
304
-
305
- export const snippetTextToAudio = (
306
- model: ModelDataMinimal,
307
- accessToken: string,
308
- provider: SnippetInferenceProvider
309
- ): InferenceSnippet[] => {
310
- if (provider !== "hf-inference") {
311
- return [];
312
- }
313
- const commonSnippet = `async function query(data) {
314
- const response = await fetch(
315
- "https://router.huggingface.co/hf-inference/models/${model.id}",
316
- {
317
- headers: {
318
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
319
- "Content-Type": "application/json",
320
- },
321
- method: "POST",
322
- body: JSON.stringify(data),
323
- }
324
- );`;
325
- if (model.library_name === "transformers") {
326
- return [
327
- {
328
- client: "fetch",
329
- content:
330
- commonSnippet +
331
- `
332
- const result = await response.blob();
333
- return result;
334
- }
335
- query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
336
- // Returns a byte object of the Audio wavform. Use it directly!
337
- });`,
338
- },
339
- ];
340
- } else {
341
- return [
342
- {
343
- client: "fetch",
344
- content:
345
- commonSnippet +
346
- `
347
- const result = await response.json();
348
- return result;
349
- }
350
-
351
- query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
352
- console.log(JSON.stringify(response));
353
- });`,
354
- },
355
- ];
356
- }
357
- };
358
-
359
- export const snippetAutomaticSpeechRecognition = (
360
- model: ModelDataMinimal,
361
- accessToken: string,
362
- provider: SnippetInferenceProvider
363
- ): InferenceSnippet[] => {
364
- return [
365
- {
366
- client: "huggingface.js",
367
- content: `\
368
- import { HfInference } from "@huggingface/inference";
369
-
370
- const client = new HfInference("${accessToken || `{API_TOKEN}`}");
371
-
372
- const data = fs.readFileSync(${getModelInputSnippet(model)});
373
-
374
- const output = await client.automaticSpeechRecognition({
375
- data,
376
- model: "${model.id}",
377
- provider: "${provider}",
378
- });
379
-
380
- console.log(output);
381
- `,
382
- },
383
- ...(provider === "hf-inference" ? snippetFile(model, accessToken, provider) : []),
384
- ];
385
- };
386
-
387
- export const snippetFile = (
388
- model: ModelDataMinimal,
389
- accessToken: string,
390
- provider: SnippetInferenceProvider
391
- ): InferenceSnippet[] => {
392
- if (provider !== "hf-inference") {
393
- return [];
394
- }
395
- return [
396
- {
397
- client: "fetch",
398
- content: `async function query(filename) {
399
- const data = fs.readFileSync(filename);
400
- const response = await fetch(
401
- "https://router.huggingface.co/hf-inference/models/${model.id}",
402
- {
403
- headers: {
404
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
405
- "Content-Type": "application/json",
406
- },
407
- method: "POST",
408
- body: data,
409
- }
410
- );
411
- const result = await response.json();
412
- return result;
413
- }
414
-
415
- query(${getModelInputSnippet(model)}).then((response) => {
416
- console.log(JSON.stringify(response));
417
- });`,
418
- },
419
- ];
420
- };
421
-
422
- export const jsSnippets: Partial<
423
- Record<
424
- PipelineType,
425
- (
426
- model: ModelDataMinimal,
427
- accessToken: string,
428
- provider: SnippetInferenceProvider,
429
- providerModelId?: string,
430
- opts?: Record<string, unknown>
431
- ) => InferenceSnippet[]
432
- >
433
- > = {
434
- // Same order as in tasks/src/pipelines.ts
435
- "text-classification": snippetBasic,
436
- "token-classification": snippetBasic,
437
- "table-question-answering": snippetBasic,
438
- "question-answering": snippetBasic,
439
- "zero-shot-classification": snippetZeroShotClassification,
440
- translation: snippetBasic,
441
- summarization: snippetBasic,
442
- "feature-extraction": snippetBasic,
443
- "text-generation": snippetTextGeneration,
444
- "image-text-to-text": snippetTextGeneration,
445
- "text2text-generation": snippetBasic,
446
- "fill-mask": snippetBasic,
447
- "sentence-similarity": snippetBasic,
448
- "automatic-speech-recognition": snippetAutomaticSpeechRecognition,
449
- "text-to-image": snippetTextToImage,
450
- "text-to-video": snippetTextToVideo,
451
- "text-to-speech": snippetTextToAudio,
452
- "text-to-audio": snippetTextToAudio,
453
- "audio-to-audio": snippetFile,
454
- "audio-classification": snippetFile,
455
- "image-classification": snippetFile,
456
- "image-to-text": snippetFile,
457
- "object-detection": snippetFile,
458
- "image-segmentation": snippetFile,
459
- };
460
-
461
- export function getJsInferenceSnippet(
462
- model: ModelDataMinimal,
463
- accessToken: string,
464
- provider: SnippetInferenceProvider,
465
- providerModelId?: string,
466
- opts?: Record<string, unknown>
467
- ): InferenceSnippet[] {
468
- return model.pipeline_tag && model.pipeline_tag in jsSnippets
469
- ? jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
470
- : [];
471
- }