@huggingface/tasks 0.15.7 → 0.15.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/dist/commonjs/inference-providers.d.ts +5 -4
  2. package/dist/commonjs/inference-providers.d.ts.map +1 -1
  3. package/dist/commonjs/inference-providers.js +15 -6
  4. package/dist/commonjs/library-to-tasks.d.ts +1 -1
  5. package/dist/commonjs/library-to-tasks.js +1 -1
  6. package/dist/commonjs/model-libraries-snippets.d.ts +1 -0
  7. package/dist/commonjs/model-libraries-snippets.d.ts.map +1 -1
  8. package/dist/commonjs/model-libraries-snippets.js +22 -8
  9. package/dist/commonjs/model-libraries.d.ts +8 -1
  10. package/dist/commonjs/model-libraries.d.ts.map +1 -1
  11. package/dist/commonjs/model-libraries.js +7 -0
  12. package/dist/commonjs/pipelines.d.ts +7 -1
  13. package/dist/commonjs/pipelines.d.ts.map +1 -1
  14. package/dist/commonjs/pipelines.js +6 -0
  15. package/dist/commonjs/snippets/common.js +2 -2
  16. package/dist/commonjs/snippets/curl.d.ts +7 -7
  17. package/dist/commonjs/snippets/curl.d.ts.map +1 -1
  18. package/dist/commonjs/snippets/curl.js +5 -5
  19. package/dist/commonjs/snippets/js.d.ts +9 -9
  20. package/dist/commonjs/snippets/js.d.ts.map +1 -1
  21. package/dist/commonjs/snippets/js.js +11 -9
  22. package/dist/commonjs/snippets/python.d.ts +6 -6
  23. package/dist/commonjs/snippets/python.d.ts.map +1 -1
  24. package/dist/commonjs/snippets/python.js +51 -7
  25. package/dist/commonjs/tasks/index.d.ts.map +1 -1
  26. package/dist/commonjs/tasks/index.js +2 -0
  27. package/dist/commonjs/tasks/text-generation/data.d.ts.map +1 -1
  28. package/dist/commonjs/tasks/text-generation/data.js +1 -3
  29. package/dist/esm/inference-providers.d.ts +5 -4
  30. package/dist/esm/inference-providers.d.ts.map +1 -1
  31. package/dist/esm/inference-providers.js +14 -5
  32. package/dist/esm/library-to-tasks.d.ts +1 -1
  33. package/dist/esm/library-to-tasks.js +1 -1
  34. package/dist/esm/model-libraries-snippets.d.ts +1 -0
  35. package/dist/esm/model-libraries-snippets.d.ts.map +1 -1
  36. package/dist/esm/model-libraries-snippets.js +19 -6
  37. package/dist/esm/model-libraries.d.ts +8 -1
  38. package/dist/esm/model-libraries.d.ts.map +1 -1
  39. package/dist/esm/model-libraries.js +7 -0
  40. package/dist/esm/pipelines.d.ts +7 -1
  41. package/dist/esm/pipelines.d.ts.map +1 -1
  42. package/dist/esm/pipelines.js +6 -0
  43. package/dist/esm/snippets/common.js +2 -2
  44. package/dist/esm/snippets/curl.d.ts +7 -7
  45. package/dist/esm/snippets/curl.d.ts.map +1 -1
  46. package/dist/esm/snippets/curl.js +5 -5
  47. package/dist/esm/snippets/js.d.ts +9 -9
  48. package/dist/esm/snippets/js.d.ts.map +1 -1
  49. package/dist/esm/snippets/js.js +11 -9
  50. package/dist/esm/snippets/python.d.ts +6 -6
  51. package/dist/esm/snippets/python.d.ts.map +1 -1
  52. package/dist/esm/snippets/python.js +52 -8
  53. package/dist/esm/tasks/index.d.ts.map +1 -1
  54. package/dist/esm/tasks/index.js +2 -0
  55. package/dist/esm/tasks/text-generation/data.d.ts.map +1 -1
  56. package/dist/esm/tasks/text-generation/data.js +1 -3
  57. package/package.json +1 -1
  58. package/src/inference-providers.ts +16 -7
  59. package/src/library-to-tasks.ts +1 -1
  60. package/src/model-libraries-snippets.ts +20 -6
  61. package/src/model-libraries.ts +7 -0
  62. package/src/pipelines.ts +6 -0
  63. package/src/snippets/common.ts +2 -2
  64. package/src/snippets/curl.ts +19 -19
  65. package/src/snippets/js.ts +22 -20
  66. package/src/snippets/python.ts +67 -16
  67. package/src/tasks/audio-classification/about.md +1 -1
  68. package/src/tasks/audio-to-audio/about.md +1 -1
  69. package/src/tasks/automatic-speech-recognition/about.md +1 -1
  70. package/src/tasks/feature-extraction/spec/input.json +11 -4
  71. package/src/tasks/image-text-to-text/about.md +1 -1
  72. package/src/tasks/index.ts +2 -0
  73. package/src/tasks/sentence-similarity/about.md +2 -2
  74. package/src/tasks/text-generation/data.ts +1 -3
  75. package/src/tasks/text-to-speech/about.md +1 -1
@@ -1,11 +1,11 @@
1
- import { openAIbaseUrl, type InferenceProvider } from "../inference-providers.js";
2
- import type { PipelineType } from "../pipelines.js";
1
+ import { openAIbaseUrl, type SnippetInferenceProvider } from "../inference-providers.js";
2
+ import type { PipelineType, WidgetType } from "../pipelines.js";
3
3
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
4
  import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
5
5
  import { getModelInputSnippet } from "./inputs.js";
6
6
  import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
7
7
 
8
- const HFJS_METHODS: Record<string, string> = {
8
+ const HFJS_METHODS: Partial<Record<WidgetType, string>> = {
9
9
  "text-classification": "textClassification",
10
10
  "token-classification": "tokenClassification",
11
11
  "table-question-answering": "tableQuestionAnswering",
@@ -22,7 +22,7 @@ const HFJS_METHODS: Record<string, string> = {
22
22
  export const snippetBasic = (
23
23
  model: ModelDataMinimal,
24
24
  accessToken: string,
25
- provider: InferenceProvider
25
+ provider: SnippetInferenceProvider
26
26
  ): InferenceSnippet[] => {
27
27
  return [
28
28
  ...(model.pipeline_tag && model.pipeline_tag in HFJS_METHODS
@@ -40,7 +40,7 @@ const output = await client.${HFJS_METHODS[model.pipeline_tag]}({
40
40
  provider: "${provider}",
41
41
  });
42
42
 
43
- console.log(output)
43
+ console.log(output);
44
44
  `,
45
45
  },
46
46
  ]
@@ -50,7 +50,7 @@ console.log(output)
50
50
  content: `\
51
51
  async function query(data) {
52
52
  const response = await fetch(
53
- "https://api-inference.huggingface.co/models/${model.id}",
53
+ "https://router.huggingface.co/hf-inference/models/${model.id}",
54
54
  {
55
55
  headers: {
56
56
  Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
@@ -74,7 +74,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
74
74
  export const snippetTextGeneration = (
75
75
  model: ModelDataMinimal,
76
76
  accessToken: string,
77
- provider: InferenceProvider,
77
+ provider: SnippetInferenceProvider,
78
78
  opts?: {
79
79
  streaming?: boolean;
80
80
  messages?: ChatCompletionInputMessage[];
@@ -139,7 +139,7 @@ let out = "";
139
139
  const stream = await client.chat.completions.create({
140
140
  model: "${model.id}",
141
141
  messages: ${messagesStr},
142
- ${configStr},
142
+ ${configStr}
143
143
  stream: true,
144
144
  });
145
145
 
@@ -167,7 +167,8 @@ const chatCompletion = await client.chatCompletion({
167
167
  ${configStr}
168
168
  });
169
169
 
170
- console.log(chatCompletion.choices[0].message);`,
170
+ console.log(chatCompletion.choices[0].message);
171
+ `,
171
172
  },
172
173
  {
173
174
  client: "openai",
@@ -184,7 +185,8 @@ const chatCompletion = await client.chat.completions.create({
184
185
  ${configStr}
185
186
  });
186
187
 
187
- console.log(chatCompletion.choices[0].message);`,
188
+ console.log(chatCompletion.choices[0].message);
189
+ `,
188
190
  },
189
191
  ];
190
192
  }
@@ -199,7 +201,7 @@ export const snippetZeroShotClassification = (model: ModelDataMinimal, accessTok
199
201
  client: "fetch",
200
202
  content: `async function query(data) {
201
203
  const response = await fetch(
202
- "https://api-inference.huggingface.co/models/${model.id}",
204
+ "https://router.huggingface.co/hf-inference/models/${model.id}",
203
205
  {
204
206
  headers: {
205
207
  Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
@@ -225,7 +227,7 @@ export const snippetZeroShotClassification = (model: ModelDataMinimal, accessTok
225
227
  export const snippetTextToImage = (
226
228
  model: ModelDataMinimal,
227
229
  accessToken: string,
228
- provider: InferenceProvider
230
+ provider: SnippetInferenceProvider
229
231
  ): InferenceSnippet[] => {
230
232
  return [
231
233
  {
@@ -250,7 +252,7 @@ const image = await client.textToImage({
250
252
  client: "fetch",
251
253
  content: `async function query(data) {
252
254
  const response = await fetch(
253
- "https://api-inference.huggingface.co/models/${model.id}",
255
+ "https://router.huggingface.co/hf-inference/models/${model.id}",
254
256
  {
255
257
  headers: {
256
258
  Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
@@ -275,14 +277,14 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
275
277
  export const snippetTextToAudio = (
276
278
  model: ModelDataMinimal,
277
279
  accessToken: string,
278
- provider: InferenceProvider
280
+ provider: SnippetInferenceProvider
279
281
  ): InferenceSnippet[] => {
280
282
  if (provider !== "hf-inference") {
281
283
  return [];
282
284
  }
283
285
  const commonSnippet = `async function query(data) {
284
286
  const response = await fetch(
285
- "https://api-inference.huggingface.co/models/${model.id}",
287
+ "https://router.huggingface.co/hf-inference/models/${model.id}",
286
288
  {
287
289
  headers: {
288
290
  Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
@@ -329,7 +331,7 @@ export const snippetTextToAudio = (
329
331
  export const snippetAutomaticSpeechRecognition = (
330
332
  model: ModelDataMinimal,
331
333
  accessToken: string,
332
- provider: InferenceProvider
334
+ provider: SnippetInferenceProvider
333
335
  ): InferenceSnippet[] => {
334
336
  return [
335
337
  {
@@ -357,7 +359,7 @@ console.log(output);
357
359
  export const snippetFile = (
358
360
  model: ModelDataMinimal,
359
361
  accessToken: string,
360
- provider: InferenceProvider
362
+ provider: SnippetInferenceProvider
361
363
  ): InferenceSnippet[] => {
362
364
  if (provider !== "hf-inference") {
363
365
  return [];
@@ -368,7 +370,7 @@ export const snippetFile = (
368
370
  content: `async function query(filename) {
369
371
  const data = fs.readFileSync(filename);
370
372
  const response = await fetch(
371
- "https://api-inference.huggingface.co/models/${model.id}",
373
+ "https://router.huggingface.co/hf-inference/models/${model.id}",
372
374
  {
373
375
  headers: {
374
376
  Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
@@ -395,7 +397,7 @@ export const jsSnippets: Partial<
395
397
  (
396
398
  model: ModelDataMinimal,
397
399
  accessToken: string,
398
- provider: InferenceProvider,
400
+ provider: SnippetInferenceProvider,
399
401
  opts?: Record<string, unknown>
400
402
  ) => InferenceSnippet[]
401
403
  >
@@ -429,7 +431,7 @@ export const jsSnippets: Partial<
429
431
  export function getJsInferenceSnippet(
430
432
  model: ModelDataMinimal,
431
433
  accessToken: string,
432
- provider: InferenceProvider,
434
+ provider: SnippetInferenceProvider,
433
435
  opts?: Record<string, unknown>
434
436
  ): InferenceSnippet[] {
435
437
  return model.pipeline_tag && model.pipeline_tag in jsSnippets
@@ -1,11 +1,45 @@
1
- import { HF_HUB_INFERENCE_PROXY_TEMPLATE, openAIbaseUrl, type InferenceProvider } from "../inference-providers.js";
2
- import type { PipelineType } from "../pipelines.js";
1
+ import {
2
+ HF_HUB_INFERENCE_PROXY_TEMPLATE,
3
+ openAIbaseUrl,
4
+ type SnippetInferenceProvider,
5
+ } from "../inference-providers.js";
6
+ import type { PipelineType, WidgetType } from "../pipelines.js";
3
7
  import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4
8
  import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
5
9
  import { getModelInputSnippet } from "./inputs.js";
6
10
  import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
7
11
 
8
- const snippetImportInferenceClient = (accessToken: string, provider: InferenceProvider): string =>
12
+ const HFH_INFERENCE_CLIENT_METHODS: Partial<Record<WidgetType, string>> = {
13
+ "audio-classification": "audio_classification",
14
+ "audio-to-audio": "audio_to_audio",
15
+ "automatic-speech-recognition": "automatic_speech_recognition",
16
+ "text-to-speech": "text_to_speech",
17
+ "image-classification": "image_classification",
18
+ "image-segmentation": "image_segmentation",
19
+ "image-to-image": "image_to_image",
20
+ "image-to-text": "image_to_text",
21
+ "object-detection": "object_detection",
22
+ "text-to-image": "text_to_image",
23
+ "text-to-video": "text_to_video",
24
+ "zero-shot-image-classification": "zero_shot_image_classification",
25
+ "document-question-answering": "document_question_answering",
26
+ "visual-question-answering": "visual_question_answering",
27
+ "feature-extraction": "feature_extraction",
28
+ "fill-mask": "fill_mask",
29
+ "question-answering": "question_answering",
30
+ "sentence-similarity": "sentence_similarity",
31
+ summarization: "summarization",
32
+ "table-question-answering": "table_question_answering",
33
+ "text-classification": "text_classification",
34
+ "text-generation": "text_generation",
35
+ "token-classification": "token_classification",
36
+ translation: "translation",
37
+ "zero-shot-classification": "zero_shot_classification",
38
+ "tabular-classification": "tabular_classification",
39
+ "tabular-regression": "tabular_regression",
40
+ };
41
+
42
+ const snippetImportInferenceClient = (accessToken: string, provider: SnippetInferenceProvider): string =>
9
43
  `\
10
44
  from huggingface_hub import InferenceClient
11
45
 
@@ -17,7 +51,7 @@ client = InferenceClient(
17
51
  export const snippetConversational = (
18
52
  model: ModelDataMinimal,
19
53
  accessToken: string,
20
- provider: InferenceProvider,
54
+ provider: SnippetInferenceProvider,
21
55
  opts?: {
22
56
  streaming?: boolean;
23
57
  messages?: ChatCompletionInputMessage[];
@@ -53,7 +87,7 @@ messages = ${messagesStr}
53
87
  stream = client.chat.completions.create(
54
88
  model="${model.id}",
55
89
  messages=messages,
56
- ${configStr},
90
+ ${configStr}
57
91
  stream=True
58
92
  )
59
93
 
@@ -75,7 +109,7 @@ messages = ${messagesStr}
75
109
  stream = client.chat.completions.create(
76
110
  model="${model.id}",
77
111
  messages=messages,
78
- ${configStr},
112
+ ${configStr}
79
113
  stream=True
80
114
  )
81
115
 
@@ -164,8 +198,30 @@ output = query({
164
198
  ];
165
199
  };
166
200
 
167
- export const snippetBasic = (model: ModelDataMinimal): InferenceSnippet[] => {
201
+ export const snippetBasic = (
202
+ model: ModelDataMinimal,
203
+ accessToken: string,
204
+ provider: SnippetInferenceProvider
205
+ ): InferenceSnippet[] => {
168
206
  return [
207
+ ...(model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS
208
+ ? [
209
+ {
210
+ client: "huggingface_hub",
211
+ content: `\
212
+ ${snippetImportInferenceClient(accessToken, provider)}
213
+
214
+ result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
215
+ model="${model.id}",
216
+ inputs=${getModelInputSnippet(model)},
217
+ provider="${provider}",
218
+ )
219
+
220
+ print(result)
221
+ `,
222
+ },
223
+ ]
224
+ : []),
169
225
  {
170
226
  client: "requests",
171
227
  content: `\
@@ -199,7 +255,7 @@ output = query(${getModelInputSnippet(model)})`,
199
255
  export const snippetTextToImage = (
200
256
  model: ModelDataMinimal,
201
257
  accessToken: string,
202
- provider: InferenceProvider
258
+ provider: SnippetInferenceProvider
203
259
  ): InferenceSnippet[] => {
204
260
  return [
205
261
  {
@@ -337,7 +393,7 @@ export const pythonSnippets: Partial<
337
393
  (
338
394
  model: ModelDataMinimal,
339
395
  accessToken: string,
340
- provider: InferenceProvider,
396
+ provider: SnippetInferenceProvider,
341
397
  opts?: Record<string, unknown>
342
398
  ) => InferenceSnippet[]
343
399
  >
@@ -375,7 +431,7 @@ export const pythonSnippets: Partial<
375
431
  export function getPythonInferenceSnippet(
376
432
  model: ModelDataMinimal,
377
433
  accessToken: string,
378
- provider: InferenceProvider,
434
+ provider: SnippetInferenceProvider,
379
435
  opts?: Record<string, unknown>
380
436
  ): InferenceSnippet[] {
381
437
  if (model.tags.includes("conversational")) {
@@ -387,11 +443,6 @@ export function getPythonInferenceSnippet(
387
443
  ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider) ?? []
388
444
  : [];
389
445
 
390
- const baseUrl =
391
- provider === "hf-inference"
392
- ? `https://api-inference.huggingface.co/models/${model.id}`
393
- : HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
394
-
395
446
  return snippets.map((snippet) => {
396
447
  return {
397
448
  ...snippet,
@@ -400,7 +451,7 @@ export function getPythonInferenceSnippet(
400
451
  ? `\
401
452
  import requests
402
453
 
403
- API_URL = "${baseUrl}"
454
+ API_URL = "${openAIbaseUrl(provider)}"
404
455
  headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
405
456
 
406
457
  ${snippet.content}`
@@ -33,7 +33,7 @@ import json
33
33
  import requests
34
34
 
35
35
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
36
- API_URL = "https://api-inference.huggingface.co/models/superb/hubert-large-superb-er"
36
+ API_URL = "https://router.huggingface.co/hf-inference/models/superb/hubert-large-superb-er"
37
37
 
38
38
  def query(filename):
39
39
  with open(filename, "rb") as f:
@@ -19,7 +19,7 @@ import json
19
19
  import requests
20
20
 
21
21
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
22
- API_URL = "https://api-inference.huggingface.co/models/speechbrain/mtl-mimic-voicebank"
22
+ API_URL = "https://router.huggingface.co/hf-inference/models/speechbrain/mtl-mimic-voicebank"
23
23
 
24
24
  def query(filename):
25
25
  with open(filename, "rb") as f:
@@ -25,7 +25,7 @@ import json
25
25
  import requests
26
26
 
27
27
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
28
- API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3"
28
+ API_URL = "https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3"
29
29
 
30
30
  def query(filename):
31
31
  with open(filename, "rb") as f:
@@ -8,11 +8,18 @@
8
8
  "properties": {
9
9
  "inputs": {
10
10
  "title": "FeatureExtractionInputs",
11
+ "description": "The text or list of texts to embed.",
11
12
  "oneOf": [
12
- { "type": "string" },
13
- { "type": "array", "items": { "type": "string" } }
14
- ],
15
- "description": "The text or list of texts to embed."
13
+ {
14
+ "type": "string"
15
+ },
16
+ {
17
+ "type": "array",
18
+ "items": {
19
+ "type": "string"
20
+ }
21
+ }
22
+ ]
16
23
  },
17
24
  "normalize": {
18
25
  "type": "boolean",
@@ -76,7 +76,7 @@ outputs[0]["generated_text"]
76
76
  You can also use the Inference API to test image-text-to-text models. You need to use a [Hugging Face token](https://huggingface.co/settings/tokens) for authentication.
77
77
 
78
78
  ```bash
79
- curl https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct \
79
+ curl https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct \
80
80
  -X POST \
81
81
  -d '{"messages": [{"role": "user","content": [{"type": "image"}, {"type": "text", "text": "Can you describe the image?"}]}]}' \
82
82
  -H "Content-Type: application/json" \
@@ -174,6 +174,7 @@ export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
174
174
  "text-to-3d": ["diffusers"],
175
175
  "image-to-3d": ["diffusers"],
176
176
  "any-to-any": ["transformers"],
177
+ "visual-document-retrieval": ["transformers"],
177
178
  };
178
179
 
179
180
  /**
@@ -202,6 +203,7 @@ export const TASKS_DATA: Record<PipelineType, TaskData | undefined> = {
202
203
  "automatic-speech-recognition": getData("automatic-speech-recognition", automaticSpeechRecognition),
203
204
  "depth-estimation": getData("depth-estimation", depthEstimation),
204
205
  "document-question-answering": getData("document-question-answering", documentQuestionAnswering),
206
+ "visual-document-retrieval": getData("visual-document-retrieval", placeholder),
205
207
  "feature-extraction": getData("feature-extraction", featureExtraction),
206
208
  "fill-mask": getData("fill-mask", fillMask),
207
209
  "graph-ml": undefined,
@@ -22,7 +22,7 @@ You can infer with Passage Ranking models using [Inference Endpoints](https://hu
22
22
  import json
23
23
  import requests
24
24
 
25
- API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/msmarco-distilbert-base-tas-b"
25
+ API_URL = "https://router.huggingface.co/hf-inference/models/sentence-transformers/msmarco-distilbert-base-tas-b"
26
26
  headers = {"Authorization": f"Bearer {api_token}"}
27
27
 
28
28
  def query(payload):
@@ -51,7 +51,7 @@ Semantic Textual Similarity is the task of evaluating how similar two texts are
51
51
  import json
52
52
  import requests
53
53
 
54
- API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/all-MiniLM-L6-v2"
54
+ API_URL = "https://router.huggingface.co/hf-inference/models/sentence-transformers/all-MiniLM-L6-v2"
55
55
  headers = {"Authorization": f"Bearer {api_token}"}
56
56
 
57
57
  def query(payload):
@@ -61,9 +61,7 @@ const taskData: TaskDataCustom = {
61
61
  },
62
62
  ],
63
63
  models: [
64
- { description: "A text-generation model trained to follow instructions.",
65
- id: "google/gemma-2-2b-it",
66
- },
64
+ { description: "A text-generation model trained to follow instructions.", id: "google/gemma-2-2b-it" },
67
65
  {
68
66
  description: "Smaller variant of one of the most powerful models.",
69
67
  id: "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
@@ -19,7 +19,7 @@ import json
19
19
  import requests
20
20
 
21
21
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
22
- API_URL = "https://api-inference.huggingface.co/models/microsoft/speecht5_tts"
22
+ API_URL = "https://router.huggingface.co/hf-inference/models/microsoft/speecht5_tts"
23
23
 
24
24
  def query(payload):
25
25
  response = requests.post(API_URL, headers=headers, json=payload)