@huggingface/tasks 0.10.20 → 0.10.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -90,6 +90,7 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
90
90
  repoUrl: "https://github.com/facebookresearch/audiocraft",
91
91
  snippets: snippets.audiocraft,
92
92
  filter: false,
93
+ countDownloads: `path:"state_dict.bin"`,
93
94
  },
94
95
  audioseal: {
95
96
  prettyLabel: "AudioSeal",
@@ -241,6 +242,16 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
241
242
  docsUrl: "https://huggingface.co/docs/hub/keras",
242
243
  snippets: snippets.keras,
243
244
  filter: true,
245
+ countDownloads: `path:"config.json" OR path_extension:"keras"`,
246
+ },
247
+ "tf-keras": {
248
+ // Legacy "Keras 2" library (tensorflow-only)
249
+ prettyLabel: "TF-Keras",
250
+ repoName: "TF-Keras",
251
+ repoUrl: "https://github.com/keras-team/tf-keras",
252
+ docsUrl: "https://huggingface.co/docs/hub/tf-keras",
253
+ snippets: snippets.tf_keras,
254
+ filter: true,
244
255
  countDownloads: `path:"saved_model.pb"`,
245
256
  },
246
257
  "keras-nlp": {
@@ -260,6 +271,14 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
260
271
  repoName: "mindspore",
261
272
  repoUrl: "https://github.com/mindspore-ai/mindspore",
262
273
  },
274
+ "mars5-tts": {
275
+ prettyLabel: "MARS5-TTS",
276
+ repoName: "MARS5-TTS",
277
+ repoUrl: "https://github.com/Camb-ai/MARS5-TTS",
278
+ filter: false,
279
+ countDownloads: `path:"mars5_ar.safetensors"`,
280
+ snippets: snippets.mars5_tts,
281
+ },
263
282
  "ml-agents": {
264
283
  prettyLabel: "ml-agents",
265
284
  repoName: "ml-agents",
@@ -495,6 +514,8 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
495
514
  prettyLabel: "WhisperKit",
496
515
  repoName: "WhisperKit",
497
516
  repoUrl: "https://github.com/argmaxinc/WhisperKit",
517
+ docsUrl: "https://github.com/argmaxinc/WhisperKit?tab=readme-ov-file#homebrew",
518
+ snippets: snippets.whisperkit,
498
519
  countDownloads: `path_filename:"model" AND path_extension:"mil" AND _exists_:"path_prefix"`,
499
520
  },
500
521
  } satisfies Record<string, LibraryUiElement>;
@@ -10,6 +10,24 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): stri
10
10
  -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
11
11
  `;
12
12
 
13
+ export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
14
+ if (model.config?.tokenizer_config?.chat_template) {
15
+ // Conversational model detected, so we display a code snippet that features the Messages API
16
+ return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
17
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
18
+ -H 'Content-Type: application/json' \\
19
+ -d '{
20
+ "model": "${model.id}",
21
+ "messages": [{"role": "user", "content": "What is the capital of France?"}],
22
+ "max_tokens": 500,
23
+ "stream": false
24
+ }'
25
+ `;
26
+ } else {
27
+ return snippetBasic(model, accessToken);
28
+ }
29
+ };
30
+
13
31
  export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
14
32
  `curl https://api-inference.huggingface.co/models/${model.id} \\
15
33
  -X POST \\
@@ -35,7 +53,7 @@ export const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal
35
53
  translation: snippetBasic,
36
54
  summarization: snippetBasic,
37
55
  "feature-extraction": snippetBasic,
38
- "text-generation": snippetBasic,
56
+ "text-generation": snippetTextGeneration,
39
57
  "text2text-generation": snippetBasic,
40
58
  "fill-mask": snippetBasic,
41
59
  "sentence-similarity": snippetBasic,
@@ -11,30 +11,30 @@ const inputsSummarization = () =>
11
11
 
12
12
  const inputsTableQuestionAnswering = () =>
13
13
  `{
14
- "query": "How many stars does the transformers repository have?",
15
- "table": {
16
- "Repository": ["Transformers", "Datasets", "Tokenizers"],
17
- "Stars": ["36542", "4512", "3934"],
18
- "Contributors": ["651", "77", "34"],
19
- "Programming language": [
20
- "Python",
21
- "Python",
22
- "Rust, Python and NodeJS"
23
- ]
24
- }
25
- }`;
14
+ "query": "How many stars does the transformers repository have?",
15
+ "table": {
16
+ "Repository": ["Transformers", "Datasets", "Tokenizers"],
17
+ "Stars": ["36542", "4512", "3934"],
18
+ "Contributors": ["651", "77", "34"],
19
+ "Programming language": [
20
+ "Python",
21
+ "Python",
22
+ "Rust, Python and NodeJS"
23
+ ]
24
+ }
25
+ }`;
26
26
 
27
27
  const inputsVisualQuestionAnswering = () =>
28
28
  `{
29
- "image": "cat.png",
30
- "question": "What is in this image?"
31
- }`;
29
+ "image": "cat.png",
30
+ "question": "What is in this image?"
31
+ }`;
32
32
 
33
33
  const inputsQuestionAnswering = () =>
34
34
  `{
35
- "question": "What is my name?",
36
- "context": "My name is Clara and I live in Berkeley."
37
- }`;
35
+ "question": "What is my name?",
36
+ "context": "My name is Clara and I live in Berkeley."
37
+ }`;
38
38
 
39
39
  const inputsTextClassification = () => `"I like you. I love you"`;
40
40
 
@@ -48,13 +48,13 @@ const inputsFillMask = (model: ModelDataMinimal) => `"The answer to the universe
48
48
 
49
49
  const inputsSentenceSimilarity = () =>
50
50
  `{
51
- "source_sentence": "That is a happy person",
52
- "sentences": [
53
- "That is a happy dog",
54
- "That is a very happy person",
55
- "Today is a sunny day"
56
- ]
57
- }`;
51
+ "source_sentence": "That is a happy person",
52
+ "sentences": [
53
+ "That is a happy dog",
54
+ "That is a very happy person",
55
+ "Today is a sunny day"
56
+ ]
57
+ }`;
58
58
 
59
59
  const inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`;
60
60
 
@@ -7,7 +7,10 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): stri
7
7
  const response = await fetch(
8
8
  "https://api-inference.huggingface.co/models/${model.id}",
9
9
  {
10
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
10
+ headers: {
11
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
12
+ "Content-Type": "application/json",
13
+ },
11
14
  method: "POST",
12
15
  body: JSON.stringify(data),
13
16
  }
@@ -20,12 +23,34 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
20
23
  console.log(JSON.stringify(response));
21
24
  });`;
22
25
 
26
+ export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
27
+ if (model.config?.tokenizer_config?.chat_template) {
28
+ // Conversational model detected, so we display a code snippet that features the Messages API
29
+ return `import { HfInference } from "@huggingface/inference";
30
+
31
+ const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
32
+
33
+ for await (const chunk of inference.chatCompletionStream({
34
+ model: "${model.id}",
35
+ messages: [{ role: "user", content: "What is the capital of France?" }],
36
+ max_tokens: 500,
37
+ })) {
38
+ process.stdout.write(chunk.choices[0]?.delta?.content || "");
39
+ }
40
+ `;
41
+ } else {
42
+ return snippetBasic(model, accessToken);
43
+ }
44
+ };
23
45
  export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
24
46
  `async function query(data) {
25
47
  const response = await fetch(
26
48
  "https://api-inference.huggingface.co/models/${model.id}",
27
49
  {
28
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
50
+ headers: {
51
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
52
+ "Content-Type": "application/json",
53
+ },
29
54
  method: "POST",
30
55
  body: JSON.stringify(data),
31
56
  }
@@ -45,7 +70,10 @@ export const snippetTextToImage = (model: ModelDataMinimal, accessToken: string)
45
70
  const response = await fetch(
46
71
  "https://api-inference.huggingface.co/models/${model.id}",
47
72
  {
48
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
73
+ headers: {
74
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
75
+ "Content-Type": "application/json",
76
+ },
49
77
  method: "POST",
50
78
  body: JSON.stringify(data),
51
79
  }
@@ -62,7 +90,10 @@ export const snippetTextToAudio = (model: ModelDataMinimal, accessToken: string)
62
90
  const response = await fetch(
63
91
  "https://api-inference.huggingface.co/models/${model.id}",
64
92
  {
65
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
93
+ headers: {
94
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
95
+ "Content-Type": "application/json",
96
+ },
66
97
  method: "POST",
67
98
  body: JSON.stringify(data),
68
99
  }
@@ -99,7 +130,10 @@ export const snippetFile = (model: ModelDataMinimal, accessToken: string): strin
99
130
  const response = await fetch(
100
131
  "https://api-inference.huggingface.co/models/${model.id}",
101
132
  {
102
- headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
133
+ headers: {
134
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
135
+ "Content-Type": "application/json",
136
+ },
103
137
  method: "POST",
104
138
  body: data,
105
139
  }
@@ -122,7 +156,7 @@ export const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal,
122
156
  translation: snippetBasic,
123
157
  summarization: snippetBasic,
124
158
  "feature-extraction": snippetBasic,
125
- "text-generation": snippetBasic,
159
+ "text-generation": snippetTextGeneration,
126
160
  "text2text-generation": snippetBasic,
127
161
  "fill-mask": snippetBasic,
128
162
  "sentence-similarity": snippetBasic,
@@ -2,6 +2,22 @@ import type { PipelineType } from "../pipelines.js";
2
2
  import { getModelInputSnippet } from "./inputs.js";
3
3
  import type { ModelDataMinimal } from "./types.js";
4
4
 
5
+ export const snippetConversational = (model: ModelDataMinimal, accessToken: string): string =>
6
+ `from huggingface_hub import InferenceClient
7
+
8
+ client = InferenceClient(
9
+ "${model.id}",
10
+ token="${accessToken || "{API_TOKEN}"}",
11
+ )
12
+
13
+ for message in client.chat_completion(
14
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
15
+ max_tokens=500,
16
+ stream=True,
17
+ ):
18
+ print(message.choices[0].delta.content, end="")
19
+ `;
20
+
5
21
  export const snippetZeroShotClassification = (model: ModelDataMinimal): string =>
6
22
  `def query(payload):
7
23
  response = requests.post(API_URL, headers=headers, json=payload)
@@ -107,7 +123,7 @@ output = query({
107
123
  "inputs": ${getModelInputSnippet(model)},
108
124
  })`;
109
125
 
110
- export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal) => string>> = {
126
+ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>> = {
111
127
  // Same order as in tasks/src/pipelines.ts
112
128
  "text-classification": snippetBasic,
113
129
  "token-classification": snippetBasic,
@@ -138,15 +154,22 @@ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinim
138
154
  };
139
155
 
140
156
  export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string {
141
- const body =
142
- model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model) ?? "" : "";
157
+ if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) {
158
+ // Conversational model detected, so we display a code snippet that features the Messages API
159
+ return snippetConversational(model, accessToken);
160
+ } else {
161
+ const body =
162
+ model.pipeline_tag && model.pipeline_tag in pythonSnippets
163
+ ? pythonSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
164
+ : "";
143
165
 
144
- return `import requests
166
+ return `import requests
145
167
 
146
168
  API_URL = "https://api-inference.huggingface.co/models/${model.id}"
147
169
  headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
148
170
 
149
171
  ${body}`;
172
+ }
150
173
  }
151
174
 
152
175
  export function hasPythonInferenceSnippet(model: ModelDataMinimal): boolean {
@@ -5,4 +5,4 @@ import type { ModelData } from "../model-data";
5
5
  *
6
6
  * Add more fields as needed.
7
7
  */
8
- export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name">;
8
+ export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config">;