@huggingface/inference 3.5.2 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/dist/browser/index.cjs +1652 -0
  2. package/dist/browser/index.js +1652 -0
  3. package/dist/index.cjs +277 -971
  4. package/dist/index.js +268 -982
  5. package/dist/src/index.d.ts.map +1 -1
  6. package/dist/src/lib/makeRequestOptions.d.ts +16 -1
  7. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  8. package/dist/src/providers/novita.d.ts.map +1 -1
  9. package/dist/src/snippets/getInferenceSnippets.d.ts +4 -0
  10. package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -0
  11. package/dist/src/snippets/index.d.ts +1 -4
  12. package/dist/src/snippets/index.d.ts.map +1 -1
  13. package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
  14. package/package.json +15 -6
  15. package/src/index.ts +1 -1
  16. package/src/lib/makeRequestOptions.ts +37 -10
  17. package/src/providers/fireworks-ai.ts +1 -1
  18. package/src/providers/hf-inference.ts +1 -1
  19. package/src/providers/nebius.ts +3 -3
  20. package/src/providers/novita.ts +7 -6
  21. package/src/providers/sambanova.ts +1 -1
  22. package/src/providers/together.ts +3 -3
  23. package/src/snippets/getInferenceSnippets.ts +398 -0
  24. package/src/snippets/index.ts +1 -5
  25. package/src/snippets/templates/js/fetch/basic.jinja +19 -0
  26. package/src/snippets/templates/js/fetch/basicAudio.jinja +19 -0
  27. package/src/snippets/templates/js/fetch/basicImage.jinja +19 -0
  28. package/src/snippets/templates/js/fetch/textToAudio.jinja +41 -0
  29. package/src/snippets/templates/js/fetch/textToImage.jinja +19 -0
  30. package/src/snippets/templates/js/fetch/zeroShotClassification.jinja +22 -0
  31. package/src/snippets/templates/js/huggingface.js/basic.jinja +11 -0
  32. package/src/snippets/templates/js/huggingface.js/basicAudio.jinja +13 -0
  33. package/src/snippets/templates/js/huggingface.js/basicImage.jinja +13 -0
  34. package/src/snippets/templates/js/huggingface.js/conversational.jinja +11 -0
  35. package/src/snippets/templates/js/huggingface.js/conversationalStream.jinja +19 -0
  36. package/src/snippets/templates/js/huggingface.js/textToImage.jinja +11 -0
  37. package/src/snippets/templates/js/huggingface.js/textToVideo.jinja +10 -0
  38. package/src/snippets/templates/js/openai/conversational.jinja +13 -0
  39. package/src/snippets/templates/js/openai/conversationalStream.jinja +22 -0
  40. package/src/snippets/templates/python/fal_client/textToImage.jinja +11 -0
  41. package/src/snippets/templates/python/huggingface_hub/basic.jinja +4 -0
  42. package/src/snippets/templates/python/huggingface_hub/basicAudio.jinja +1 -0
  43. package/src/snippets/templates/python/huggingface_hub/basicImage.jinja +1 -0
  44. package/src/snippets/templates/python/huggingface_hub/conversational.jinja +6 -0
  45. package/src/snippets/templates/python/huggingface_hub/conversationalStream.jinja +8 -0
  46. package/src/snippets/templates/python/huggingface_hub/documentQuestionAnswering.jinja +5 -0
  47. package/src/snippets/templates/python/huggingface_hub/imageToImage.jinja +6 -0
  48. package/src/snippets/templates/python/huggingface_hub/importInferenceClient.jinja +6 -0
  49. package/src/snippets/templates/python/huggingface_hub/textToImage.jinja +5 -0
  50. package/src/snippets/templates/python/huggingface_hub/textToVideo.jinja +4 -0
  51. package/src/snippets/templates/python/openai/conversational.jinja +13 -0
  52. package/src/snippets/templates/python/openai/conversationalStream.jinja +15 -0
  53. package/src/snippets/templates/python/requests/basic.jinja +7 -0
  54. package/src/snippets/templates/python/requests/basicAudio.jinja +7 -0
  55. package/src/snippets/templates/python/requests/basicImage.jinja +7 -0
  56. package/src/snippets/templates/python/requests/conversational.jinja +9 -0
  57. package/src/snippets/templates/python/requests/conversationalStream.jinja +16 -0
  58. package/src/snippets/templates/python/requests/documentQuestionAnswering.jinja +13 -0
  59. package/src/snippets/templates/python/requests/imageToImage.jinja +15 -0
  60. package/src/snippets/templates/python/requests/importRequests.jinja +10 -0
  61. package/src/snippets/templates/python/requests/tabular.jinja +9 -0
  62. package/src/snippets/templates/python/requests/textToAudio.jinja +23 -0
  63. package/src/snippets/templates/python/requests/textToImage.jinja +14 -0
  64. package/src/snippets/templates/python/requests/zeroShotClassification.jinja +8 -0
  65. package/src/snippets/templates/python/requests/zeroShotImageClassification.jinja +14 -0
  66. package/src/snippets/templates/sh/curl/basic.jinja +7 -0
  67. package/src/snippets/templates/sh/curl/basicAudio.jinja +5 -0
  68. package/src/snippets/templates/sh/curl/basicImage.jinja +5 -0
  69. package/src/snippets/templates/sh/curl/conversational.jinja +7 -0
  70. package/src/snippets/templates/sh/curl/conversationalStream.jinja +7 -0
  71. package/src/snippets/templates/sh/curl/zeroShotClassification.jinja +5 -0
  72. package/src/tasks/cv/textToVideo.ts +25 -5
  73. package/src/vendor/fetch-event-source/LICENSE +21 -0
  74. package/dist/src/snippets/curl.d.ts +0 -17
  75. package/dist/src/snippets/curl.d.ts.map +0 -1
  76. package/dist/src/snippets/js.d.ts +0 -21
  77. package/dist/src/snippets/js.d.ts.map +0 -1
  78. package/dist/src/snippets/python.d.ts +0 -4
  79. package/dist/src/snippets/python.d.ts.map +0 -1
  80. package/src/snippets/curl.ts +0 -177
  81. package/src/snippets/js.ts +0 -475
  82. package/src/snippets/python.ts +0 -563
@@ -1,177 +0,0 @@
1
- import { HF_HUB_INFERENCE_PROXY_TEMPLATE, type SnippetInferenceProvider } from "@huggingface/tasks";
2
- import type { PipelineType } from "@huggingface/tasks/src/pipelines.js";
3
- import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
4
- import {
5
- type InferenceSnippet,
6
- type ModelDataMinimal,
7
- getModelInputSnippet,
8
- stringifyGenerationConfig,
9
- stringifyMessages,
10
- } from "@huggingface/tasks";
11
-
12
- export const snippetBasic = (
13
- model: ModelDataMinimal,
14
- accessToken: string,
15
- provider: SnippetInferenceProvider
16
- ): InferenceSnippet[] => {
17
- if (provider !== "hf-inference") {
18
- return [];
19
- }
20
- return [
21
- {
22
- client: "curl",
23
- content: `\
24
- curl https://router.huggingface.co/hf-inference/models/${model.id} \\
25
- -X POST \\
26
- -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\
27
- -H 'Content-Type: application/json' \\
28
- -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
29
- },
30
- ];
31
- };
32
-
33
- export const snippetTextGeneration = (
34
- model: ModelDataMinimal,
35
- accessToken: string,
36
- provider: SnippetInferenceProvider,
37
- providerModelId?: string,
38
- opts?: {
39
- streaming?: boolean;
40
- messages?: ChatCompletionInputMessage[];
41
- temperature?: GenerationParameters["temperature"];
42
- max_tokens?: GenerationParameters["max_tokens"];
43
- top_p?: GenerationParameters["top_p"];
44
- }
45
- ): InferenceSnippet[] => {
46
- if (model.tags.includes("conversational")) {
47
- const baseUrl =
48
- provider === "hf-inference"
49
- ? `https://router.huggingface.co/hf-inference/models/${model.id}/v1/chat/completions`
50
- : HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";
51
- const modelId = providerModelId ?? model.id;
52
-
53
- // Conversational model detected, so we display a code snippet that features the Messages API
54
- const streaming = opts?.streaming ?? true;
55
- const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
56
- const messages = opts?.messages ?? exampleMessages;
57
-
58
- const config = {
59
- ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
60
- max_tokens: opts?.max_tokens ?? 500,
61
- ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
62
- };
63
- return [
64
- {
65
- client: "curl",
66
- content: `curl '${baseUrl}' \\
67
- -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}' \\
68
- -H 'Content-Type: application/json' \\
69
- --data '{
70
- "model": "${modelId}",
71
- "messages": ${stringifyMessages(messages, {
72
- indent: "\t",
73
- attributeKeyQuotes: true,
74
- customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
75
- })},
76
- ${stringifyGenerationConfig(config, {
77
- indent: "\n ",
78
- attributeKeyQuotes: true,
79
- attributeValueConnector: ": ",
80
- })}
81
- "stream": ${!!streaming}
82
- }'`,
83
- },
84
- ];
85
- } else {
86
- return snippetBasic(model, accessToken, provider);
87
- }
88
- };
89
-
90
- export const snippetZeroShotClassification = (
91
- model: ModelDataMinimal,
92
- accessToken: string,
93
- provider: SnippetInferenceProvider
94
- ): InferenceSnippet[] => {
95
- if (provider !== "hf-inference") {
96
- return [];
97
- }
98
- return [
99
- {
100
- client: "curl",
101
- content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
102
- -X POST \\
103
- -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
104
- -H 'Content-Type: application/json' \\
105
- -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
106
- },
107
- ];
108
- };
109
-
110
- export const snippetFile = (
111
- model: ModelDataMinimal,
112
- accessToken: string,
113
- provider: SnippetInferenceProvider
114
- ): InferenceSnippet[] => {
115
- if (provider !== "hf-inference") {
116
- return [];
117
- }
118
- return [
119
- {
120
- client: "curl",
121
- content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
122
- -X POST \\
123
- --data-binary '@${getModelInputSnippet(model, true, true)}' \\
124
- -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
125
- },
126
- ];
127
- };
128
-
129
- export const curlSnippets: Partial<
130
- Record<
131
- PipelineType,
132
- (
133
- model: ModelDataMinimal,
134
- accessToken: string,
135
- provider: SnippetInferenceProvider,
136
- providerModelId?: string,
137
- opts?: Record<string, unknown>
138
- ) => InferenceSnippet[]
139
- >
140
- > = {
141
- // Same order as in tasks/src/pipelines.ts
142
- "text-classification": snippetBasic,
143
- "token-classification": snippetBasic,
144
- "table-question-answering": snippetBasic,
145
- "question-answering": snippetBasic,
146
- "zero-shot-classification": snippetZeroShotClassification,
147
- translation: snippetBasic,
148
- summarization: snippetBasic,
149
- "feature-extraction": snippetBasic,
150
- "text-generation": snippetTextGeneration,
151
- "image-text-to-text": snippetTextGeneration,
152
- "text2text-generation": snippetBasic,
153
- "fill-mask": snippetBasic,
154
- "sentence-similarity": snippetBasic,
155
- "automatic-speech-recognition": snippetFile,
156
- "text-to-image": snippetBasic,
157
- "text-to-speech": snippetBasic,
158
- "text-to-audio": snippetBasic,
159
- "audio-to-audio": snippetFile,
160
- "audio-classification": snippetFile,
161
- "image-classification": snippetFile,
162
- "image-to-text": snippetFile,
163
- "object-detection": snippetFile,
164
- "image-segmentation": snippetFile,
165
- };
166
-
167
- export function getCurlInferenceSnippet(
168
- model: ModelDataMinimal,
169
- accessToken: string,
170
- provider: SnippetInferenceProvider,
171
- providerModelId?: string,
172
- opts?: Record<string, unknown>
173
- ): InferenceSnippet[] {
174
- return model.pipeline_tag && model.pipeline_tag in curlSnippets
175
- ? curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
176
- : [];
177
- }
@@ -1,475 +0,0 @@
1
- import { openAIbaseUrl, type SnippetInferenceProvider } from "@huggingface/tasks";
2
- import type { PipelineType, WidgetType } from "@huggingface/tasks/src/pipelines.js";
3
- import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
4
- import {
5
- type InferenceSnippet,
6
- type ModelDataMinimal,
7
- getModelInputSnippet,
8
- stringifyGenerationConfig,
9
- stringifyMessages,
10
- } from "@huggingface/tasks";
11
-
12
- const HFJS_METHODS: Partial<Record<WidgetType, string>> = {
13
- "text-classification": "textClassification",
14
- "token-classification": "tokenClassification",
15
- "table-question-answering": "tableQuestionAnswering",
16
- "question-answering": "questionAnswering",
17
- translation: "translation",
18
- summarization: "summarization",
19
- "feature-extraction": "featureExtraction",
20
- "text-generation": "textGeneration",
21
- "text2text-generation": "textGeneration",
22
- "fill-mask": "fillMask",
23
- "sentence-similarity": "sentenceSimilarity",
24
- };
25
-
26
- export const snippetBasic = (
27
- model: ModelDataMinimal,
28
- accessToken: string,
29
- provider: SnippetInferenceProvider
30
- ): InferenceSnippet[] => {
31
- return [
32
- ...(model.pipeline_tag && model.pipeline_tag in HFJS_METHODS
33
- ? [
34
- {
35
- client: "huggingface.js",
36
- content: `\
37
- import { InferenceClient } from "@huggingface/inference";
38
-
39
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
40
-
41
- const output = await client.${HFJS_METHODS[model.pipeline_tag]}({
42
- model: "${model.id}",
43
- inputs: ${getModelInputSnippet(model)},
44
- provider: "${provider}",
45
- });
46
-
47
- console.log(output);
48
- `,
49
- },
50
- ]
51
- : []),
52
- {
53
- client: "fetch",
54
- content: `\
55
- async function query(data) {
56
- const response = await fetch(
57
- "https://router.huggingface.co/hf-inference/models/${model.id}",
58
- {
59
- headers: {
60
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
61
- "Content-Type": "application/json",
62
- },
63
- method: "POST",
64
- body: JSON.stringify(data),
65
- }
66
- );
67
- const result = await response.json();
68
- return result;
69
- }
70
-
71
- query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
72
- console.log(JSON.stringify(response));
73
- });`,
74
- },
75
- ];
76
- };
77
-
78
- export const snippetTextGeneration = (
79
- model: ModelDataMinimal,
80
- accessToken: string,
81
- provider: SnippetInferenceProvider,
82
- providerModelId?: string,
83
- opts?: {
84
- streaming?: boolean;
85
- messages?: ChatCompletionInputMessage[];
86
- temperature?: GenerationParameters["temperature"];
87
- max_tokens?: GenerationParameters["max_tokens"];
88
- top_p?: GenerationParameters["top_p"];
89
- }
90
- ): InferenceSnippet[] => {
91
- if (model.tags.includes("conversational")) {
92
- // Conversational model detected, so we display a code snippet that features the Messages API
93
- const streaming = opts?.streaming ?? true;
94
- const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
95
- const messages = opts?.messages ?? exampleMessages;
96
- const messagesStr = stringifyMessages(messages, { indent: "\t" });
97
-
98
- const config = {
99
- ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
100
- max_tokens: opts?.max_tokens ?? 500,
101
- ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
102
- };
103
- const configStr = stringifyGenerationConfig(config, {
104
- indent: "\n\t",
105
- attributeValueConnector: ": ",
106
- });
107
-
108
- if (streaming) {
109
- return [
110
- {
111
- client: "huggingface.js",
112
- content: `import { InferenceClient } from "@huggingface/inference";
113
-
114
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
115
-
116
- let out = "";
117
-
118
- const stream = client.chatCompletionStream({
119
- model: "${model.id}",
120
- messages: ${messagesStr},
121
- provider: "${provider}",
122
- ${configStr}
123
- });
124
-
125
- for await (const chunk of stream) {
126
- if (chunk.choices && chunk.choices.length > 0) {
127
- const newContent = chunk.choices[0].delta.content;
128
- out += newContent;
129
- console.log(newContent);
130
- }
131
- }`,
132
- },
133
- {
134
- client: "openai",
135
- content: `import { OpenAI } from "openai";
136
-
137
- const client = new OpenAI({
138
- baseURL: "${openAIbaseUrl(provider)}",
139
- apiKey: "${accessToken || `{API_TOKEN}`}"
140
- });
141
-
142
- let out = "";
143
-
144
- const stream = await client.chat.completions.create({
145
- model: "${providerModelId ?? model.id}",
146
- messages: ${messagesStr},
147
- ${configStr}
148
- stream: true,
149
- });
150
-
151
- for await (const chunk of stream) {
152
- if (chunk.choices && chunk.choices.length > 0) {
153
- const newContent = chunk.choices[0].delta.content;
154
- out += newContent;
155
- console.log(newContent);
156
- }
157
- }`,
158
- },
159
- ];
160
- } else {
161
- return [
162
- {
163
- client: "huggingface.js",
164
- content: `import { InferenceClient } from "@huggingface/inference";
165
-
166
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
167
-
168
- const chatCompletion = await client.chatCompletion({
169
- model: "${model.id}",
170
- messages: ${messagesStr},
171
- provider: "${provider}",
172
- ${configStr}
173
- });
174
-
175
- console.log(chatCompletion.choices[0].message);
176
- `,
177
- },
178
- {
179
- client: "openai",
180
- content: `import { OpenAI } from "openai";
181
-
182
- const client = new OpenAI({
183
- baseURL: "${openAIbaseUrl(provider)}",
184
- apiKey: "${accessToken || `{API_TOKEN}`}"
185
- });
186
-
187
- const chatCompletion = await client.chat.completions.create({
188
- model: "${providerModelId ?? model.id}",
189
- messages: ${messagesStr},
190
- ${configStr}
191
- });
192
-
193
- console.log(chatCompletion.choices[0].message);
194
- `,
195
- },
196
- ];
197
- }
198
- } else {
199
- return snippetBasic(model, accessToken, provider);
200
- }
201
- };
202
-
203
- export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): InferenceSnippet[] => {
204
- return [
205
- {
206
- client: "fetch",
207
- content: `async function query(data) {
208
- const response = await fetch(
209
- "https://router.huggingface.co/hf-inference/models/${model.id}",
210
- {
211
- headers: {
212
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
213
- "Content-Type": "application/json",
214
- },
215
- method: "POST",
216
- body: JSON.stringify(data),
217
- }
218
- );
219
- const result = await response.json();
220
- return result;
221
- }
222
-
223
- query({"inputs": ${getModelInputSnippet(
224
- model
225
- )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
226
- console.log(JSON.stringify(response));
227
- });`,
228
- },
229
- ];
230
- };
231
-
232
- export const snippetTextToImage = (
233
- model: ModelDataMinimal,
234
- accessToken: string,
235
- provider: SnippetInferenceProvider
236
- ): InferenceSnippet[] => {
237
- return [
238
- {
239
- client: "huggingface.js",
240
- content: `\
241
- import { InferenceClient } from "@huggingface/inference";
242
-
243
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
244
-
245
- const image = await client.textToImage({
246
- model: "${model.id}",
247
- inputs: ${getModelInputSnippet(model)},
248
- parameters: { num_inference_steps: 5 },
249
- provider: "${provider}",
250
- });
251
- /// Use the generated image (it's a Blob)
252
- `,
253
- },
254
- ...(provider === "hf-inference"
255
- ? [
256
- {
257
- client: "fetch",
258
- content: `async function query(data) {
259
- const response = await fetch(
260
- "https://router.huggingface.co/hf-inference/models/${model.id}",
261
- {
262
- headers: {
263
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
264
- "Content-Type": "application/json",
265
- },
266
- method: "POST",
267
- body: JSON.stringify(data),
268
- }
269
- );
270
- const result = await response.blob();
271
- return result;
272
- }
273
- query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
274
- // Use image
275
- });`,
276
- },
277
- ]
278
- : []),
279
- ];
280
- };
281
-
282
- export const snippetTextToVideo = (
283
- model: ModelDataMinimal,
284
- accessToken: string,
285
- provider: SnippetInferenceProvider
286
- ): InferenceSnippet[] => {
287
- return ["fal-ai", "replicate"].includes(provider)
288
- ? [
289
- {
290
- client: "huggingface.js",
291
- content: `\
292
- import { InferenceClient } from "@huggingface/inference";
293
-
294
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
295
-
296
- const video = await client.textToVideo({
297
- model: "${model.id}",
298
- provider: "${provider}",
299
- inputs: ${getModelInputSnippet(model)},
300
- parameters: { num_inference_steps: 5 },
301
- });
302
- // Use the generated video (it's a Blob)
303
- `,
304
- },
305
- ]
306
- : [];
307
- };
308
-
309
- export const snippetTextToAudio = (
310
- model: ModelDataMinimal,
311
- accessToken: string,
312
- provider: SnippetInferenceProvider
313
- ): InferenceSnippet[] => {
314
- if (provider !== "hf-inference") {
315
- return [];
316
- }
317
- const commonSnippet = `async function query(data) {
318
- const response = await fetch(
319
- "https://router.huggingface.co/hf-inference/models/${model.id}",
320
- {
321
- headers: {
322
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
323
- "Content-Type": "application/json",
324
- },
325
- method: "POST",
326
- body: JSON.stringify(data),
327
- }
328
- );`;
329
- if (model.library_name === "transformers") {
330
- return [
331
- {
332
- client: "fetch",
333
- content:
334
- commonSnippet +
335
- `
336
- const result = await response.blob();
337
- return result;
338
- }
339
- query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
340
- // Returns a byte object of the Audio wavform. Use it directly!
341
- });`,
342
- },
343
- ];
344
- } else {
345
- return [
346
- {
347
- client: "fetch",
348
- content:
349
- commonSnippet +
350
- `
351
- const result = await response.json();
352
- return result;
353
- }
354
-
355
- query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
356
- console.log(JSON.stringify(response));
357
- });`,
358
- },
359
- ];
360
- }
361
- };
362
-
363
- export const snippetAutomaticSpeechRecognition = (
364
- model: ModelDataMinimal,
365
- accessToken: string,
366
- provider: SnippetInferenceProvider
367
- ): InferenceSnippet[] => {
368
- return [
369
- {
370
- client: "huggingface.js",
371
- content: `\
372
- import { InferenceClient } from "@huggingface/inference";
373
-
374
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
375
-
376
- const data = fs.readFileSync(${getModelInputSnippet(model)});
377
-
378
- const output = await client.automaticSpeechRecognition({
379
- data,
380
- model: "${model.id}",
381
- provider: "${provider}",
382
- });
383
-
384
- console.log(output);
385
- `,
386
- },
387
- ...(provider === "hf-inference" ? snippetFile(model, accessToken, provider) : []),
388
- ];
389
- };
390
-
391
- export const snippetFile = (
392
- model: ModelDataMinimal,
393
- accessToken: string,
394
- provider: SnippetInferenceProvider
395
- ): InferenceSnippet[] => {
396
- if (provider !== "hf-inference") {
397
- return [];
398
- }
399
- return [
400
- {
401
- client: "fetch",
402
- content: `async function query(filename) {
403
- const data = fs.readFileSync(filename);
404
- const response = await fetch(
405
- "https://router.huggingface.co/hf-inference/models/${model.id}",
406
- {
407
- headers: {
408
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
409
- "Content-Type": "application/json",
410
- },
411
- method: "POST",
412
- body: data,
413
- }
414
- );
415
- const result = await response.json();
416
- return result;
417
- }
418
-
419
- query(${getModelInputSnippet(model)}).then((response) => {
420
- console.log(JSON.stringify(response));
421
- });`,
422
- },
423
- ];
424
- };
425
-
426
- export const jsSnippets: Partial<
427
- Record<
428
- PipelineType,
429
- (
430
- model: ModelDataMinimal,
431
- accessToken: string,
432
- provider: SnippetInferenceProvider,
433
- providerModelId?: string,
434
- opts?: Record<string, unknown>
435
- ) => InferenceSnippet[]
436
- >
437
- > = {
438
- // Same order as in tasks/src/pipelines.ts
439
- "text-classification": snippetBasic,
440
- "token-classification": snippetBasic,
441
- "table-question-answering": snippetBasic,
442
- "question-answering": snippetBasic,
443
- "zero-shot-classification": snippetZeroShotClassification,
444
- translation: snippetBasic,
445
- summarization: snippetBasic,
446
- "feature-extraction": snippetBasic,
447
- "text-generation": snippetTextGeneration,
448
- "image-text-to-text": snippetTextGeneration,
449
- "text2text-generation": snippetBasic,
450
- "fill-mask": snippetBasic,
451
- "sentence-similarity": snippetBasic,
452
- "automatic-speech-recognition": snippetAutomaticSpeechRecognition,
453
- "text-to-image": snippetTextToImage,
454
- "text-to-video": snippetTextToVideo,
455
- "text-to-speech": snippetTextToAudio,
456
- "text-to-audio": snippetTextToAudio,
457
- "audio-to-audio": snippetFile,
458
- "audio-classification": snippetFile,
459
- "image-classification": snippetFile,
460
- "image-to-text": snippetFile,
461
- "object-detection": snippetFile,
462
- "image-segmentation": snippetFile,
463
- };
464
-
465
- export function getJsInferenceSnippet(
466
- model: ModelDataMinimal,
467
- accessToken: string,
468
- provider: SnippetInferenceProvider,
469
- providerModelId?: string,
470
- opts?: Record<string, unknown>
471
- ): InferenceSnippet[] {
472
- return model.pipeline_tag && model.pipeline_tag in jsSnippets
473
- ? jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
474
- : [];
475
- }