@huggingface/tasks 0.16.7 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/commonjs/index.d.ts +1 -3
  2. package/dist/commonjs/index.d.ts.map +1 -1
  3. package/dist/commonjs/index.js +5 -15
  4. package/dist/commonjs/snippets/index.d.ts +2 -5
  5. package/dist/commonjs/snippets/index.d.ts.map +1 -1
  6. package/dist/commonjs/snippets/index.js +2 -21
  7. package/dist/esm/index.d.ts +1 -3
  8. package/dist/esm/index.d.ts.map +1 -1
  9. package/dist/esm/index.js +1 -2
  10. package/dist/esm/snippets/index.d.ts +2 -5
  11. package/dist/esm/snippets/index.d.ts.map +1 -1
  12. package/dist/esm/snippets/index.js +2 -5
  13. package/package.json +1 -1
  14. package/src/index.ts +7 -3
  15. package/src/snippets/index.ts +2 -6
  16. package/dist/commonjs/snippets/curl.d.ts +0 -17
  17. package/dist/commonjs/snippets/curl.d.ts.map +0 -1
  18. package/dist/commonjs/snippets/curl.js +0 -129
  19. package/dist/commonjs/snippets/js.d.ts +0 -21
  20. package/dist/commonjs/snippets/js.d.ts.map +0 -1
  21. package/dist/commonjs/snippets/js.js +0 -413
  22. package/dist/commonjs/snippets/python.d.ts +0 -23
  23. package/dist/commonjs/snippets/python.d.ts.map +0 -1
  24. package/dist/commonjs/snippets/python.js +0 -435
  25. package/dist/esm/snippets/curl.d.ts +0 -17
  26. package/dist/esm/snippets/curl.d.ts.map +0 -1
  27. package/dist/esm/snippets/curl.js +0 -121
  28. package/dist/esm/snippets/js.d.ts +0 -21
  29. package/dist/esm/snippets/js.d.ts.map +0 -1
  30. package/dist/esm/snippets/js.js +0 -401
  31. package/dist/esm/snippets/python.d.ts +0 -23
  32. package/dist/esm/snippets/python.d.ts.map +0 -1
  33. package/dist/esm/snippets/python.js +0 -421
  34. package/src/snippets/curl.ts +0 -173
  35. package/src/snippets/js.ts +0 -471
  36. package/src/snippets/python.ts +0 -483
@@ -1,483 +0,0 @@
1
- import { openAIbaseUrl, type SnippetInferenceProvider } from "../inference-providers.js";
2
- import type { PipelineType, WidgetType } from "../pipelines.js";
3
- import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
- import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
5
- import { getModelInputSnippet } from "./inputs.js";
6
- import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
7
-
8
- const HFH_INFERENCE_CLIENT_METHODS: Partial<Record<WidgetType, string>> = {
9
- "audio-classification": "audio_classification",
10
- "audio-to-audio": "audio_to_audio",
11
- "automatic-speech-recognition": "automatic_speech_recognition",
12
- "text-to-speech": "text_to_speech",
13
- "image-classification": "image_classification",
14
- "image-segmentation": "image_segmentation",
15
- "image-to-image": "image_to_image",
16
- "image-to-text": "image_to_text",
17
- "object-detection": "object_detection",
18
- "text-to-image": "text_to_image",
19
- "text-to-video": "text_to_video",
20
- "zero-shot-image-classification": "zero_shot_image_classification",
21
- "document-question-answering": "document_question_answering",
22
- "visual-question-answering": "visual_question_answering",
23
- "feature-extraction": "feature_extraction",
24
- "fill-mask": "fill_mask",
25
- "question-answering": "question_answering",
26
- "sentence-similarity": "sentence_similarity",
27
- summarization: "summarization",
28
- "table-question-answering": "table_question_answering",
29
- "text-classification": "text_classification",
30
- "text-generation": "text_generation",
31
- "token-classification": "token_classification",
32
- translation: "translation",
33
- "zero-shot-classification": "zero_shot_classification",
34
- "tabular-classification": "tabular_classification",
35
- "tabular-regression": "tabular_regression",
36
- };
37
-
38
- const snippetImportInferenceClient = (accessToken: string, provider: SnippetInferenceProvider): string =>
39
- `\
40
- from huggingface_hub import InferenceClient
41
-
42
- client = InferenceClient(
43
- provider="${provider}",
44
- api_key="${accessToken || "{API_TOKEN}"}"
45
- )`;
46
-
47
- export const snippetConversational = (
48
- model: ModelDataMinimal,
49
- accessToken: string,
50
- provider: SnippetInferenceProvider,
51
- providerModelId?: string,
52
- opts?: {
53
- streaming?: boolean;
54
- messages?: ChatCompletionInputMessage[];
55
- temperature?: GenerationParameters["temperature"];
56
- max_tokens?: GenerationParameters["max_tokens"];
57
- top_p?: GenerationParameters["top_p"];
58
- }
59
- ): InferenceSnippet[] => {
60
- const streaming = opts?.streaming ?? true;
61
- const exampleMessages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
62
- const messages = opts?.messages ?? exampleMessages;
63
- const messagesStr = stringifyMessages(messages, { attributeKeyQuotes: true });
64
-
65
- const config = {
66
- ...(opts?.temperature ? { temperature: opts.temperature } : undefined),
67
- max_tokens: opts?.max_tokens ?? 500,
68
- ...(opts?.top_p ? { top_p: opts.top_p } : undefined),
69
- };
70
- const configStr = stringifyGenerationConfig(config, {
71
- indent: "\n\t",
72
- attributeValueConnector: "=",
73
- });
74
-
75
- if (streaming) {
76
- return [
77
- {
78
- client: "huggingface_hub",
79
- content: `\
80
- ${snippetImportInferenceClient(accessToken, provider)}
81
-
82
- messages = ${messagesStr}
83
-
84
- stream = client.chat.completions.create(
85
- model="${model.id}",
86
- messages=messages,
87
- ${configStr}
88
- stream=True
89
- )
90
-
91
- for chunk in stream:
92
- print(chunk.choices[0].delta.content, end="")`,
93
- },
94
- {
95
- client: "openai",
96
- content: `\
97
- from openai import OpenAI
98
-
99
- client = OpenAI(
100
- base_url="${openAIbaseUrl(provider)}",
101
- api_key="${accessToken || "{API_TOKEN}"}"
102
- )
103
-
104
- messages = ${messagesStr}
105
-
106
- stream = client.chat.completions.create(
107
- model="${providerModelId ?? model.id}",
108
- messages=messages,
109
- ${configStr}
110
- stream=True
111
- )
112
-
113
- for chunk in stream:
114
- print(chunk.choices[0].delta.content, end="")`,
115
- },
116
- ];
117
- } else {
118
- return [
119
- {
120
- client: "huggingface_hub",
121
- content: `\
122
- ${snippetImportInferenceClient(accessToken, provider)}
123
-
124
- messages = ${messagesStr}
125
-
126
- completion = client.chat.completions.create(
127
- model="${model.id}",
128
- messages=messages,
129
- ${configStr}
130
- )
131
-
132
- print(completion.choices[0].message)`,
133
- },
134
- {
135
- client: "openai",
136
- content: `\
137
- from openai import OpenAI
138
-
139
- client = OpenAI(
140
- base_url="${openAIbaseUrl(provider)}",
141
- api_key="${accessToken || "{API_TOKEN}"}"
142
- )
143
-
144
- messages = ${messagesStr}
145
-
146
- completion = client.chat.completions.create(
147
- model="${providerModelId ?? model.id}",
148
- messages=messages,
149
- ${configStr}
150
- )
151
-
152
- print(completion.choices[0].message)`,
153
- },
154
- ];
155
- }
156
- };
157
-
158
- export const snippetZeroShotClassification = (model: ModelDataMinimal): InferenceSnippet[] => {
159
- return [
160
- {
161
- client: "requests",
162
- content: `\
163
- def query(payload):
164
- response = requests.post(API_URL, headers=headers, json=payload)
165
- return response.json()
166
-
167
- output = query({
168
- "inputs": ${getModelInputSnippet(model)},
169
- "parameters": {"candidate_labels": ["refund", "legal", "faq"]},
170
- })`,
171
- },
172
- ];
173
- };
174
-
175
- export const snippetZeroShotImageClassification = (model: ModelDataMinimal): InferenceSnippet[] => {
176
- return [
177
- {
178
- client: "requests",
179
- content: `\
180
- def query(data):
181
- with open(data["image_path"], "rb") as f:
182
- img = f.read()
183
- payload={
184
- "parameters": data["parameters"],
185
- "inputs": base64.b64encode(img).decode("utf-8")
186
- }
187
- response = requests.post(API_URL, headers=headers, json=payload)
188
- return response.json()
189
-
190
- output = query({
191
- "image_path": ${getModelInputSnippet(model)},
192
- "parameters": {"candidate_labels": ["cat", "dog", "llama"]},
193
- })`,
194
- },
195
- ];
196
- };
197
-
198
- export const snippetBasic = (
199
- model: ModelDataMinimal,
200
- accessToken: string,
201
- provider: SnippetInferenceProvider
202
- ): InferenceSnippet[] => {
203
- return [
204
- ...(model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS
205
- ? [
206
- {
207
- client: "huggingface_hub",
208
- content: `\
209
- ${snippetImportInferenceClient(accessToken, provider)}
210
-
211
- result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
212
- model="${model.id}",
213
- inputs=${getModelInputSnippet(model)},
214
- provider="${provider}",
215
- )
216
-
217
- print(result)
218
- `,
219
- },
220
- ]
221
- : []),
222
- {
223
- client: "requests",
224
- content: `\
225
- def query(payload):
226
- response = requests.post(API_URL, headers=headers, json=payload)
227
- return response.json()
228
-
229
- output = query({
230
- "inputs": ${getModelInputSnippet(model)},
231
- })`,
232
- },
233
- ];
234
- };
235
-
236
- export const snippetFile = (model: ModelDataMinimal): InferenceSnippet[] => {
237
- return [
238
- {
239
- client: "requests",
240
- content: `\
241
- def query(filename):
242
- with open(filename, "rb") as f:
243
- data = f.read()
244
- response = requests.post(API_URL, headers=headers, data=data)
245
- return response.json()
246
-
247
- output = query(${getModelInputSnippet(model)})`,
248
- },
249
- ];
250
- };
251
-
252
- export const snippetTextToImage = (
253
- model: ModelDataMinimal,
254
- accessToken: string,
255
- provider: SnippetInferenceProvider,
256
- providerModelId?: string
257
- ): InferenceSnippet[] => {
258
- return [
259
- {
260
- client: "huggingface_hub",
261
- content: `\
262
- ${snippetImportInferenceClient(accessToken, provider)}
263
-
264
- # output is a PIL.Image object
265
- image = client.text_to_image(
266
- ${getModelInputSnippet(model)},
267
- model="${model.id}"
268
- )`,
269
- },
270
- ...(provider === "fal-ai"
271
- ? [
272
- {
273
- client: "fal-client",
274
- content: `\
275
- import fal_client
276
-
277
- result = fal_client.subscribe(
278
- "${providerModelId ?? model.id}",
279
- arguments={
280
- "prompt": ${getModelInputSnippet(model)},
281
- },
282
- )
283
- print(result)
284
- `,
285
- },
286
- ]
287
- : []),
288
- ...(provider === "hf-inference"
289
- ? [
290
- {
291
- client: "requests",
292
- content: `\
293
- def query(payload):
294
- response = requests.post(API_URL, headers=headers, json=payload)
295
- return response.content
296
-
297
- image_bytes = query({
298
- "inputs": ${getModelInputSnippet(model)},
299
- })
300
-
301
- # You can access the image with PIL.Image for example
302
- import io
303
- from PIL import Image
304
- image = Image.open(io.BytesIO(image_bytes))`,
305
- },
306
- ]
307
- : []),
308
- ];
309
- };
310
-
311
- export const snippetTextToVideo = (
312
- model: ModelDataMinimal,
313
- accessToken: string,
314
- provider: SnippetInferenceProvider
315
- ): InferenceSnippet[] => {
316
- return ["fal-ai", "replicate"].includes(provider)
317
- ? [
318
- {
319
- client: "huggingface_hub",
320
- content: `\
321
- ${snippetImportInferenceClient(accessToken, provider)}
322
-
323
- video = client.text_to_video(
324
- ${getModelInputSnippet(model)},
325
- model="${model.id}"
326
- )`,
327
- },
328
- ]
329
- : [];
330
- };
331
-
332
- export const snippetTabular = (model: ModelDataMinimal): InferenceSnippet[] => {
333
- return [
334
- {
335
- client: "requests",
336
- content: `\
337
- def query(payload):
338
- response = requests.post(API_URL, headers=headers, json=payload)
339
- return response.content
340
-
341
- response = query({
342
- "inputs": {"data": ${getModelInputSnippet(model)}},
343
- })`,
344
- },
345
- ];
346
- };
347
-
348
- export const snippetTextToAudio = (model: ModelDataMinimal): InferenceSnippet[] => {
349
- // Transformers TTS pipeline and api-inference-community (AIC) pipeline outputs are diverged
350
- // with the latest update to inference-api (IA).
351
- // Transformers IA returns a byte object (wav file), whereas AIC returns wav and sampling_rate.
352
- if (model.library_name === "transformers") {
353
- return [
354
- {
355
- client: "requests",
356
- content: `\
357
- def query(payload):
358
- response = requests.post(API_URL, headers=headers, json=payload)
359
- return response.content
360
-
361
- audio_bytes = query({
362
- "inputs": ${getModelInputSnippet(model)},
363
- })
364
- # You can access the audio with IPython.display for example
365
- from IPython.display import Audio
366
- Audio(audio_bytes)`,
367
- },
368
- ];
369
- } else {
370
- return [
371
- {
372
- client: "requests",
373
- content: `\
374
- def query(payload):
375
- response = requests.post(API_URL, headers=headers, json=payload)
376
- return response.json()
377
-
378
- audio, sampling_rate = query({
379
- "inputs": ${getModelInputSnippet(model)},
380
- })
381
- # You can access the audio with IPython.display for example
382
- from IPython.display import Audio
383
- Audio(audio, rate=sampling_rate)`,
384
- },
385
- ];
386
- }
387
- };
388
-
389
- export const snippetDocumentQuestionAnswering = (model: ModelDataMinimal): InferenceSnippet[] => {
390
- return [
391
- {
392
- client: "requests",
393
- content: `\
394
- def query(payload):
395
- with open(payload["image"], "rb") as f:
396
- img = f.read()
397
- payload["image"] = base64.b64encode(img).decode("utf-8")
398
- response = requests.post(API_URL, headers=headers, json=payload)
399
- return response.json()
400
-
401
- output = query({
402
- "inputs": ${getModelInputSnippet(model)},
403
- })`,
404
- },
405
- ];
406
- };
407
-
408
- export const pythonSnippets: Partial<
409
- Record<
410
- PipelineType,
411
- (
412
- model: ModelDataMinimal,
413
- accessToken: string,
414
- provider: SnippetInferenceProvider,
415
- providerModelId?: string,
416
- opts?: Record<string, unknown>
417
- ) => InferenceSnippet[]
418
- >
419
- > = {
420
- // Same order as in tasks/src/pipelines.ts
421
- "text-classification": snippetBasic,
422
- "token-classification": snippetBasic,
423
- "table-question-answering": snippetBasic,
424
- "question-answering": snippetBasic,
425
- "zero-shot-classification": snippetZeroShotClassification,
426
- translation: snippetBasic,
427
- summarization: snippetBasic,
428
- "feature-extraction": snippetBasic,
429
- "text-generation": snippetBasic,
430
- "text2text-generation": snippetBasic,
431
- "image-text-to-text": snippetConversational,
432
- "fill-mask": snippetBasic,
433
- "sentence-similarity": snippetBasic,
434
- "automatic-speech-recognition": snippetFile,
435
- "text-to-image": snippetTextToImage,
436
- "text-to-video": snippetTextToVideo,
437
- "text-to-speech": snippetTextToAudio,
438
- "text-to-audio": snippetTextToAudio,
439
- "audio-to-audio": snippetFile,
440
- "audio-classification": snippetFile,
441
- "image-classification": snippetFile,
442
- "tabular-regression": snippetTabular,
443
- "tabular-classification": snippetTabular,
444
- "object-detection": snippetFile,
445
- "image-segmentation": snippetFile,
446
- "document-question-answering": snippetDocumentQuestionAnswering,
447
- "image-to-text": snippetFile,
448
- "zero-shot-image-classification": snippetZeroShotImageClassification,
449
- };
450
-
451
- export function getPythonInferenceSnippet(
452
- model: ModelDataMinimal,
453
- accessToken: string,
454
- provider: SnippetInferenceProvider,
455
- providerModelId?: string,
456
- opts?: Record<string, unknown>
457
- ): InferenceSnippet[] {
458
- if (model.tags.includes("conversational")) {
459
- // Conversational model detected, so we display a code snippet that features the Messages API
460
- return snippetConversational(model, accessToken, provider, providerModelId, opts);
461
- } else {
462
- const snippets =
463
- model.pipeline_tag && model.pipeline_tag in pythonSnippets
464
- ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId) ?? []
465
- : [];
466
-
467
- return snippets.map((snippet) => {
468
- return {
469
- ...snippet,
470
- content:
471
- snippet.client === "requests"
472
- ? `\
473
- import requests
474
-
475
- API_URL = "${openAIbaseUrl(provider)}"
476
- headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
477
-
478
- ${snippet.content}`
479
- : snippet.content,
480
- };
481
- });
482
- }
483
- }