@huggingface/inference 4.13.10 → 4.13.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +2 -0
  2. package/dist/commonjs/lib/getProviderHelper.d.ts.map +1 -1
  3. package/dist/commonjs/lib/getProviderHelper.js +4 -0
  4. package/dist/commonjs/package.d.ts +1 -1
  5. package/dist/commonjs/package.js +1 -1
  6. package/dist/commonjs/providers/consts.d.ts.map +1 -1
  7. package/dist/commonjs/providers/consts.js +1 -0
  8. package/dist/commonjs/providers/nvidia.d.ts +21 -0
  9. package/dist/commonjs/providers/nvidia.d.ts.map +1 -0
  10. package/dist/commonjs/providers/nvidia.js +26 -0
  11. package/dist/commonjs/providers/replicate.d.ts.map +1 -1
  12. package/dist/commonjs/providers/replicate.js +6 -1
  13. package/dist/commonjs/snippets/getInferenceSnippets.js +11 -20
  14. package/dist/commonjs/types.d.ts +2 -2
  15. package/dist/commonjs/types.d.ts.map +1 -1
  16. package/dist/commonjs/types.js +2 -0
  17. package/dist/esm/lib/getProviderHelper.d.ts.map +1 -1
  18. package/dist/esm/lib/getProviderHelper.js +4 -0
  19. package/dist/esm/package.d.ts +1 -1
  20. package/dist/esm/package.js +1 -1
  21. package/dist/esm/providers/consts.d.ts.map +1 -1
  22. package/dist/esm/providers/consts.js +1 -0
  23. package/dist/esm/providers/nvidia.d.ts +21 -0
  24. package/dist/esm/providers/nvidia.d.ts.map +1 -0
  25. package/dist/esm/providers/nvidia.js +22 -0
  26. package/dist/esm/providers/replicate.d.ts.map +1 -1
  27. package/dist/esm/providers/replicate.js +6 -1
  28. package/dist/esm/snippets/getInferenceSnippets.js +11 -20
  29. package/dist/esm/types.d.ts +2 -2
  30. package/dist/esm/types.d.ts.map +1 -1
  31. package/dist/esm/types.js +2 -0
  32. package/package.json +34 -34
  33. package/src/InferenceClient.ts +2 -2
  34. package/src/errors.ts +1 -1
  35. package/src/lib/getDefaultTask.ts +1 -1
  36. package/src/lib/getInferenceProviderMapping.ts +11 -11
  37. package/src/lib/getProviderHelper.ts +41 -37
  38. package/src/lib/makeRequestOptions.ts +11 -11
  39. package/src/package.ts +1 -1
  40. package/src/providers/black-forest-labs.ts +3 -3
  41. package/src/providers/consts.ts +1 -0
  42. package/src/providers/fal-ai.ts +33 -33
  43. package/src/providers/featherless-ai.ts +1 -1
  44. package/src/providers/hf-inference.ts +48 -48
  45. package/src/providers/hyperbolic.ts +3 -3
  46. package/src/providers/nebius.ts +1 -1
  47. package/src/providers/novita.ts +7 -7
  48. package/src/providers/nscale.ts +2 -2
  49. package/src/providers/nvidia.ts +23 -0
  50. package/src/providers/ovhcloud.ts +1 -1
  51. package/src/providers/providerHelper.ts +7 -7
  52. package/src/providers/replicate.ts +8 -3
  53. package/src/providers/sambanova.ts +1 -1
  54. package/src/providers/together.ts +1 -1
  55. package/src/providers/wavespeed.ts +10 -10
  56. package/src/providers/zai-org.ts +7 -7
  57. package/src/snippets/getInferenceSnippets.ts +26 -26
  58. package/src/tasks/audio/audioClassification.ts +1 -1
  59. package/src/tasks/audio/automaticSpeechRecognition.ts +1 -1
  60. package/src/tasks/audio/utils.ts +1 -1
  61. package/src/tasks/custom/request.ts +2 -2
  62. package/src/tasks/custom/streamingRequest.ts +2 -2
  63. package/src/tasks/cv/imageClassification.ts +1 -1
  64. package/src/tasks/cv/imageSegmentation.ts +1 -1
  65. package/src/tasks/cv/textToImage.ts +5 -5
  66. package/src/tasks/cv/textToVideo.ts +1 -1
  67. package/src/tasks/cv/zeroShotImageClassification.ts +3 -3
  68. package/src/tasks/multimodal/documentQuestionAnswering.ts +2 -2
  69. package/src/tasks/multimodal/visualQuestionAnswering.ts +1 -1
  70. package/src/tasks/nlp/chatCompletion.ts +1 -1
  71. package/src/tasks/nlp/chatCompletionStream.ts +1 -1
  72. package/src/tasks/nlp/featureExtraction.ts +1 -1
  73. package/src/tasks/nlp/questionAnswering.ts +2 -2
  74. package/src/tasks/nlp/sentenceSimilarity.ts +1 -1
  75. package/src/tasks/nlp/tableQuestionAnswering.ts +2 -2
  76. package/src/tasks/nlp/textClassification.ts +1 -1
  77. package/src/tasks/nlp/textGeneration.ts +1 -1
  78. package/src/tasks/nlp/textGenerationStream.ts +1 -1
  79. package/src/tasks/nlp/tokenClassification.ts +2 -2
  80. package/src/tasks/nlp/zeroShotClassification.ts +2 -2
  81. package/src/tasks/tabular/tabularClassification.ts +1 -1
  82. package/src/tasks/tabular/tabularRegression.ts +1 -1
  83. package/src/types.ts +2 -0
  84. package/src/utils/pick.ts +1 -1
  85. package/src/utils/request.ts +20 -20
  86. package/src/utils/typedEntries.ts +1 -1
package/package.json CHANGED
@@ -1,54 +1,33 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "4.13.10",
4
- "license": "MIT",
5
- "author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
3
+ "version": "4.13.12",
6
4
  "description": "Typescript client for the Hugging Face Inference Providers and Inference Endpoints",
7
- "repository": {
8
- "type": "git",
9
- "url": "https://github.com/huggingface/huggingface.js.git"
10
- },
11
- "publishConfig": {
12
- "access": "public"
13
- },
14
5
  "keywords": [
6
+ "ai",
15
7
  "hugging face",
16
8
  "hugging face typescript",
17
9
  "huggingface",
18
10
  "huggingface-inference-api",
19
11
  "huggingface-inference-api-typescript",
20
- "inference",
21
- "ai"
12
+ "inference"
22
13
  ],
23
- "engines": {
24
- "node": ">=18"
14
+ "license": "MIT",
15
+ "author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
16
+ "repository": {
17
+ "type": "git",
18
+ "url": "https://github.com/huggingface/huggingface.js.git"
25
19
  },
20
+ "source": "src/index.ts",
26
21
  "files": [
27
22
  "dist",
28
23
  "src",
29
24
  "!src/snippets/templates/**/*.jinja"
30
25
  ],
31
- "source": "src/index.ts",
32
- "types": "./dist/commonjs/index.d.ts",
26
+ "type": "module",
33
27
  "main": "./dist/commonjs/index.js",
34
28
  "module": "./dist/esm/index.js",
35
- "tshy": {
36
- "exports": {
37
- "./package.json": "./package.json",
38
- ".": "./src/index.ts"
39
- }
40
- },
41
- "type": "module",
42
- "dependencies": {
43
- "@huggingface/tasks": "^0.19.78",
44
- "@huggingface/jinja": "^0.5.3"
45
- },
46
- "devDependencies": {
47
- "@types/node": "18.13.0"
48
- },
49
- "resolutions": {},
29
+ "types": "./dist/commonjs/index.d.ts",
50
30
  "exports": {
51
- "./package.json": "./package.json",
52
31
  ".": {
53
32
  "import": {
54
33
  "types": "./dist/esm/index.d.ts",
@@ -58,14 +37,35 @@
58
37
  "types": "./dist/commonjs/index.d.ts",
59
38
  "default": "./dist/commonjs/index.js"
60
39
  }
40
+ },
41
+ "./package.json": "./package.json"
42
+ },
43
+ "publishConfig": {
44
+ "access": "public"
45
+ },
46
+ "dependencies": {
47
+ "@huggingface/tasks": "^0.19.83",
48
+ "@huggingface/jinja": "^0.5.5"
49
+ },
50
+ "devDependencies": {
51
+ "@types/node": "18.13.0"
52
+ },
53
+ "resolutions": {},
54
+ "tshy": {
55
+ "exports": {
56
+ ".": "./src/index.ts",
57
+ "./package.json": "./package.json"
61
58
  }
62
59
  },
60
+ "engines": {
61
+ "node": ">=18"
62
+ },
63
63
  "scripts": {
64
64
  "build": "pnpm run export-templates && pnpm run package-to-ts && tshy",
65
65
  "lint": "eslint --quiet --fix --ext .cjs,.ts .",
66
66
  "lint:check": "eslint --ext .cjs,.ts .",
67
- "format": "prettier --write .",
68
- "format:check": "prettier --check .",
67
+ "format": "oxfmt .",
68
+ "format:check": "oxfmt --check .",
69
69
  "test": "vitest run --config vitest.config.mts",
70
70
  "test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.mts",
71
71
  "check": "tsc",
@@ -16,7 +16,7 @@ export class InferenceClient {
16
16
  accessToken = "",
17
17
  defaultOptions: Options & {
18
18
  endpointUrl?: string;
19
- } = {}
19
+ } = {},
20
20
  ) {
21
21
  this.accessToken = accessToken;
22
22
  this.defaultOptions = defaultOptions;
@@ -32,7 +32,7 @@ export class InferenceClient {
32
32
  {
33
33
  ...omit(defaultOptions, ["endpointUrl"]),
34
34
  ...options,
35
- }
35
+ },
36
36
  ),
37
37
  });
38
38
  }
package/src/errors.ts CHANGED
@@ -51,7 +51,7 @@ abstract class InferenceClientHttpRequestError extends InferenceClientError {
51
51
  ...("Authorization" in httpRequest.headers ? { Authorization: `Bearer [redacted]` } : undefined),
52
52
  /// redact authentication in the request headers
53
53
  },
54
- }
54
+ }
55
55
  : undefined),
56
56
  };
57
57
  this.httpResponse = httpResponse;
@@ -23,7 +23,7 @@ export interface DefaultTaskOptions {
23
23
  export async function getDefaultTask(
24
24
  model: string,
25
25
  accessToken: string | undefined,
26
- options?: DefaultTaskOptions
26
+ options?: DefaultTaskOptions,
27
27
  ): Promise<string | null> {
28
28
  if (isUrl(model)) {
29
29
  return null;
@@ -28,7 +28,7 @@ function normalizeInferenceProviderMapping(
28
28
  adapter?: string;
29
29
  adapterWeightsPath?: string;
30
30
  }
31
- >
31
+ >,
32
32
  ): InferenceProviderMappingEntry[] {
33
33
  if (!inferenceProviderMapping) {
34
34
  return [];
@@ -56,7 +56,7 @@ export async function fetchInferenceProviderMappingForModel(
56
56
  accessToken?: string,
57
57
  options?: {
58
58
  fetch?: (input: RequestInfo, init?: RequestInit) => Promise<Response>;
59
- }
59
+ },
60
60
  ): Promise<InferenceProviderMappingEntry[]> {
61
61
  let inferenceProviderMapping: InferenceProviderMappingEntry[] | null;
62
62
  if (inferenceProviderMappingCache.has(modelId)) {
@@ -74,14 +74,14 @@ export async function fetchInferenceProviderMappingForModel(
74
74
  throw new InferenceClientHubApiError(
75
75
  `Failed to fetch inference provider mapping for model ${modelId}: ${error.error}`,
76
76
  { url, method: "GET" },
77
- { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: error }
77
+ { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: error },
78
78
  );
79
79
  }
80
80
  } else {
81
81
  throw new InferenceClientHubApiError(
82
82
  `Failed to fetch inference provider mapping for model ${modelId}`,
83
83
  { url, method: "GET" },
84
- { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() }
84
+ { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() },
85
85
  );
86
86
  }
87
87
  }
@@ -96,14 +96,14 @@ export async function fetchInferenceProviderMappingForModel(
96
96
  throw new InferenceClientHubApiError(
97
97
  `Failed to fetch inference provider mapping for model ${modelId}: malformed API response, invalid JSON`,
98
98
  { url, method: "GET" },
99
- { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() }
99
+ { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() },
100
100
  );
101
101
  }
102
102
  if (!payload?.inferenceProviderMapping) {
103
103
  throw new InferenceClientHubApiError(
104
104
  `We have not been able to find inference provider information for model ${modelId}.`,
105
105
  { url, method: "GET" },
106
- { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() }
106
+ { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() },
107
107
  );
108
108
  }
109
109
  inferenceProviderMapping = normalizeInferenceProviderMapping(modelId, payload.inferenceProviderMapping);
@@ -121,7 +121,7 @@ export async function getInferenceProviderMapping(
121
121
  },
122
122
  options: {
123
123
  fetch?: (input: RequestInfo, init?: RequestInit) => Promise<Response>;
124
- }
124
+ },
125
125
  ): Promise<InferenceProviderMappingEntry | null> {
126
126
  const logger = getLogger();
127
127
  if (params.provider === ("auto" as InferenceProvider) && params.task === "conversational") {
@@ -147,12 +147,12 @@ export async function getInferenceProviderMapping(
147
147
  : [params.task];
148
148
  if (!typedInclude(equivalentTasks, providerMapping.task)) {
149
149
  throw new InferenceClientInputError(
150
- `Model ${params.modelId} is not supported for task ${params.task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`
150
+ `Model ${params.modelId} is not supported for task ${params.task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`,
151
151
  );
152
152
  }
153
153
  if (providerMapping.status === "staging") {
154
154
  logger.warn(
155
- `Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
155
+ `Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`,
156
156
  );
157
157
  }
158
158
  return providerMapping;
@@ -163,7 +163,7 @@ export async function getInferenceProviderMapping(
163
163
  export async function resolveProvider(
164
164
  provider?: InferenceProviderOrPolicy,
165
165
  modelId?: string,
166
- endpointUrl?: string
166
+ endpointUrl?: string,
167
167
  ): Promise<InferenceProvider> {
168
168
  const logger = getLogger();
169
169
  if (endpointUrl) {
@@ -175,7 +175,7 @@ export async function resolveProvider(
175
175
  }
176
176
  if (!provider) {
177
177
  logger.log(
178
- "Defaulting to 'auto' which will select the first provider available for the model, sorted by the user's order in https://hf.co/settings/inference-providers."
178
+ "Defaulting to 'auto' which will select the first provider available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.",
179
179
  );
180
180
  provider = "auto";
181
181
  }
@@ -12,6 +12,7 @@ import * as Hyperbolic from "../providers/hyperbolic.js";
12
12
  import * as Nebius from "../providers/nebius.js";
13
13
  import * as Novita from "../providers/novita.js";
14
14
  import * as Nscale from "../providers/nscale.js";
15
+ import * as Nvidia from "../providers/nvidia.js";
15
16
  import * as OpenAI from "../providers/openai.js";
16
17
  import * as OvhCloud from "../providers/ovhcloud.js";
17
18
  import * as PublicAI from "../providers/publicai.js";
@@ -147,6 +148,9 @@ export const PROVIDERS: Record<InferenceProvider, Partial<Record<InferenceTask,
147
148
  "text-to-image": new Nscale.NscaleTextToImageTask(),
148
149
  conversational: new Nscale.NscaleConversationalTask(),
149
150
  },
151
+ nvidia: {
152
+ conversational: new Nvidia.NvidiaConversationalTask(),
153
+ },
150
154
  openai: {
151
155
  conversational: new OpenAI.OpenAIConversationalTask(),
152
156
  },
@@ -197,157 +201,157 @@ export const PROVIDERS: Record<InferenceProvider, Partial<Record<InferenceTask,
197
201
  */
198
202
  export function getProviderHelper(
199
203
  provider: InferenceProviderOrPolicy,
200
- task: "text-to-image"
204
+ task: "text-to-image",
201
205
  ): TextToImageTaskHelper & TaskProviderHelper;
202
206
  export function getProviderHelper(
203
207
  provider: InferenceProviderOrPolicy,
204
- task: "conversational"
208
+ task: "conversational",
205
209
  ): ConversationalTaskHelper & TaskProviderHelper;
206
210
  export function getProviderHelper(
207
211
  provider: InferenceProviderOrPolicy,
208
- task: "text-generation"
212
+ task: "text-generation",
209
213
  ): TextGenerationTaskHelper & TaskProviderHelper;
210
214
  export function getProviderHelper(
211
215
  provider: InferenceProviderOrPolicy,
212
- task: "text-to-speech"
216
+ task: "text-to-speech",
213
217
  ): TextToSpeechTaskHelper & TaskProviderHelper;
214
218
  export function getProviderHelper(
215
219
  provider: InferenceProviderOrPolicy,
216
- task: "text-to-audio"
220
+ task: "text-to-audio",
217
221
  ): TextToAudioTaskHelper & TaskProviderHelper;
218
222
  export function getProviderHelper(
219
223
  provider: InferenceProviderOrPolicy,
220
- task: "automatic-speech-recognition"
224
+ task: "automatic-speech-recognition",
221
225
  ): AutomaticSpeechRecognitionTaskHelper & TaskProviderHelper;
222
226
  export function getProviderHelper(
223
227
  provider: InferenceProviderOrPolicy,
224
- task: "text-to-video"
228
+ task: "text-to-video",
225
229
  ): TextToVideoTaskHelper & TaskProviderHelper;
226
230
  export function getProviderHelper(
227
231
  provider: InferenceProviderOrPolicy,
228
- task: "text-classification"
232
+ task: "text-classification",
229
233
  ): TextClassificationTaskHelper & TaskProviderHelper;
230
234
  export function getProviderHelper(
231
235
  provider: InferenceProviderOrPolicy,
232
- task: "question-answering"
236
+ task: "question-answering",
233
237
  ): QuestionAnsweringTaskHelper & TaskProviderHelper;
234
238
  export function getProviderHelper(
235
239
  provider: InferenceProviderOrPolicy,
236
- task: "audio-classification"
240
+ task: "audio-classification",
237
241
  ): AudioClassificationTaskHelper & TaskProviderHelper;
238
242
  export function getProviderHelper(
239
243
  provider: InferenceProviderOrPolicy,
240
- task: "audio-to-audio"
244
+ task: "audio-to-audio",
241
245
  ): AudioToAudioTaskHelper & TaskProviderHelper;
242
246
  export function getProviderHelper(
243
247
  provider: InferenceProviderOrPolicy,
244
- task: "fill-mask"
248
+ task: "fill-mask",
245
249
  ): FillMaskTaskHelper & TaskProviderHelper;
246
250
  export function getProviderHelper(
247
251
  provider: InferenceProviderOrPolicy,
248
- task: "feature-extraction"
252
+ task: "feature-extraction",
249
253
  ): FeatureExtractionTaskHelper & TaskProviderHelper;
250
254
  export function getProviderHelper(
251
255
  provider: InferenceProviderOrPolicy,
252
- task: "image-classification"
256
+ task: "image-classification",
253
257
  ): ImageClassificationTaskHelper & TaskProviderHelper;
254
258
  export function getProviderHelper(
255
259
  provider: InferenceProviderOrPolicy,
256
- task: "image-segmentation"
260
+ task: "image-segmentation",
257
261
  ): ImageSegmentationTaskHelper & TaskProviderHelper;
258
262
  export function getProviderHelper(
259
263
  provider: InferenceProviderOrPolicy,
260
- task: "document-question-answering"
264
+ task: "document-question-answering",
261
265
  ): DocumentQuestionAnsweringTaskHelper & TaskProviderHelper;
262
266
  export function getProviderHelper(
263
267
  provider: InferenceProviderOrPolicy,
264
- task: "image-to-text"
268
+ task: "image-to-text",
265
269
  ): ImageToTextTaskHelper & TaskProviderHelper;
266
270
  export function getProviderHelper(
267
271
  provider: InferenceProviderOrPolicy,
268
- task: "object-detection"
272
+ task: "object-detection",
269
273
  ): ObjectDetectionTaskHelper & TaskProviderHelper;
270
274
  export function getProviderHelper(
271
275
  provider: InferenceProviderOrPolicy,
272
- task: "zero-shot-image-classification"
276
+ task: "zero-shot-image-classification",
273
277
  ): ZeroShotImageClassificationTaskHelper & TaskProviderHelper;
274
278
  export function getProviderHelper(
275
279
  provider: InferenceProviderOrPolicy,
276
- task: "zero-shot-classification"
280
+ task: "zero-shot-classification",
277
281
  ): ZeroShotClassificationTaskHelper & TaskProviderHelper;
278
282
  export function getProviderHelper(
279
283
  provider: InferenceProviderOrPolicy,
280
- task: "image-to-image"
284
+ task: "image-to-image",
281
285
  ): ImageToImageTaskHelper & TaskProviderHelper;
282
286
  export function getProviderHelper(
283
287
  provider: InferenceProviderOrPolicy,
284
- task: "image-to-video"
288
+ task: "image-to-video",
285
289
  ): ImageToVideoTaskHelper & TaskProviderHelper;
286
290
  export function getProviderHelper(
287
291
  provider: InferenceProviderOrPolicy,
288
- task: "image-text-to-image"
292
+ task: "image-text-to-image",
289
293
  ): ImageTextToImageTaskHelper & TaskProviderHelper;
290
294
  export function getProviderHelper(
291
295
  provider: InferenceProviderOrPolicy,
292
- task: "image-text-to-video"
296
+ task: "image-text-to-video",
293
297
  ): ImageTextToVideoTaskHelper & TaskProviderHelper;
294
298
  export function getProviderHelper(
295
299
  provider: InferenceProviderOrPolicy,
296
- task: "sentence-similarity"
300
+ task: "sentence-similarity",
297
301
  ): SentenceSimilarityTaskHelper & TaskProviderHelper;
298
302
  export function getProviderHelper(
299
303
  provider: InferenceProviderOrPolicy,
300
- task: "table-question-answering"
304
+ task: "table-question-answering",
301
305
  ): TableQuestionAnsweringTaskHelper & TaskProviderHelper;
302
306
  export function getProviderHelper(
303
307
  provider: InferenceProviderOrPolicy,
304
- task: "tabular-classification"
308
+ task: "tabular-classification",
305
309
  ): TabularClassificationTaskHelper & TaskProviderHelper;
306
310
  export function getProviderHelper(
307
311
  provider: InferenceProviderOrPolicy,
308
- task: "tabular-regression"
312
+ task: "tabular-regression",
309
313
  ): TabularRegressionTaskHelper & TaskProviderHelper;
310
314
  export function getProviderHelper(
311
315
  provider: InferenceProviderOrPolicy,
312
- task: "token-classification"
316
+ task: "token-classification",
313
317
  ): TokenClassificationTaskHelper & TaskProviderHelper;
314
318
  export function getProviderHelper(
315
319
  provider: InferenceProviderOrPolicy,
316
- task: "translation"
320
+ task: "translation",
317
321
  ): TranslationTaskHelper & TaskProviderHelper;
318
322
  export function getProviderHelper(
319
323
  provider: InferenceProviderOrPolicy,
320
- task: "summarization"
324
+ task: "summarization",
321
325
  ): SummarizationTaskHelper & TaskProviderHelper;
322
326
  export function getProviderHelper(
323
327
  provider: InferenceProviderOrPolicy,
324
- task: "visual-question-answering"
328
+ task: "visual-question-answering",
325
329
  ): VisualQuestionAnsweringTaskHelper & TaskProviderHelper;
326
330
  export function getProviderHelper(
327
331
  provider: InferenceProviderOrPolicy,
328
- task: InferenceTask | undefined
332
+ task: InferenceTask | undefined,
329
333
  ): TaskProviderHelper;
330
334
  export function getProviderHelper(
331
335
  provider: InferenceProviderOrPolicy,
332
- task: InferenceTask | undefined
336
+ task: InferenceTask | undefined,
333
337
  ): TaskProviderHelper {
334
338
  if ((provider === "hf-inference" && !task) || provider === "auto") {
335
339
  return new HFInference.HFInferenceTask();
336
340
  }
337
341
  if (!task) {
338
342
  throw new InferenceClientInputError(
339
- "you need to provide a task name when using an external provider, e.g. 'text-to-image'"
343
+ "you need to provide a task name when using an external provider, e.g. 'text-to-image'",
340
344
  );
341
345
  }
342
346
  if (!(provider in PROVIDERS)) {
343
347
  throw new InferenceClientInputError(
344
- `Provider '${provider}' not supported. Available providers: ${Object.keys(PROVIDERS)}`
348
+ `Provider '${provider}' not supported. Available providers: ${Object.keys(PROVIDERS)}`,
345
349
  );
346
350
  }
347
351
  const providerTasks = PROVIDERS[provider];
348
352
  if (!providerTasks || !(task in providerTasks)) {
349
353
  throw new InferenceClientInputError(
350
- `Task '${task}' not supported for provider '${provider}'. Available tasks: ${Object.keys(providerTasks ?? {})}`
354
+ `Task '${task}' not supported for provider '${provider}'. Available tasks: ${Object.keys(providerTasks ?? {})}`,
351
355
  );
352
356
  }
353
357
  return providerTasks[task] as TaskProviderHelper;
@@ -25,7 +25,7 @@ export async function makeRequestOptions(
25
25
  options?: Options & {
26
26
  /** In most cases (unless we pass a endpointUrl) we know the task */
27
27
  task?: InferenceTask;
28
- }
28
+ },
29
29
  ): Promise<{ url: string; info: RequestInit }> {
30
30
  const { model: maybeModel } = args;
31
31
  const provider = providerHelper.provider;
@@ -46,7 +46,7 @@ export async function makeRequestOptions(
46
46
  providerHelper,
47
47
  args,
48
48
  undefined,
49
- options
49
+ options,
50
50
  );
51
51
  }
52
52
 
@@ -71,7 +71,7 @@ export async function makeRequestOptions(
71
71
  status: "live",
72
72
  // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
73
73
  task: task!,
74
- } satisfies InferenceProviderMappingEntry)
74
+ } satisfies InferenceProviderMappingEntry)
75
75
  : await getInferenceProviderMapping(
76
76
  {
77
77
  modelId: hfModel,
@@ -80,11 +80,11 @@ export async function makeRequestOptions(
80
80
  provider,
81
81
  accessToken: args.accessToken,
82
82
  },
83
- { fetch: options?.fetch }
84
- );
83
+ { fetch: options?.fetch },
84
+ );
85
85
  if (!inferenceProviderMapping) {
86
86
  throw new InferenceClientInputError(
87
- `We have not been able to find inference provider information for model ${hfModel}.`
87
+ `We have not been able to find inference provider information for model ${hfModel}.`,
88
88
  );
89
89
  }
90
90
 
@@ -94,7 +94,7 @@ export async function makeRequestOptions(
94
94
  providerHelper,
95
95
  args,
96
96
  inferenceProviderMapping,
97
- options
97
+ options,
98
98
  );
99
99
  }
100
100
 
@@ -113,7 +113,7 @@ export function makeRequestOptionsFromResolvedModel(
113
113
  options?: Options & {
114
114
  task?: InferenceTask;
115
115
  outputType?: OutputType;
116
- }
116
+ },
117
117
  ): { url: string; info: RequestInit } {
118
118
  const { accessToken, endpointUrl, provider: maybeProvider, model, urlTransform, ...remainingArgs } = args;
119
119
  void model;
@@ -154,7 +154,7 @@ export function makeRequestOptionsFromResolvedModel(
154
154
  accessToken,
155
155
  authMethod,
156
156
  },
157
- "data" in args && !!args.data
157
+ "data" in args && !!args.data,
158
158
  );
159
159
  if (billTo) {
160
160
  headers[HF_HEADER_X_BILL_TO] = billTo;
@@ -203,7 +203,7 @@ async function loadDefaultModel(task: InferenceTask): Promise<string> {
203
203
  const taskInfo = tasks[task];
204
204
  if ((taskInfo?.models.length ?? 0) <= 0) {
205
205
  throw new InferenceClientInputError(
206
- `No default model defined for task ${task}, please define the model explicitly.`
206
+ `No default model defined for task ${task}, please define the model explicitly.`,
207
207
  );
208
208
  }
209
209
  return taskInfo.models[0].id;
@@ -217,7 +217,7 @@ async function loadTaskInfo(): Promise<Record<string, { models: { id: string }[]
217
217
  throw new InferenceClientHubApiError(
218
218
  "Failed to load tasks definitions from Hugging Face Hub.",
219
219
  { url, method: "GET" },
220
- { requestId: res.headers.get("x-request-id") ?? "", status: res.status, body: await res.text() }
220
+ { requestId: res.headers.get("x-request-id") ?? "", status: res.status, body: await res.text() },
221
221
  );
222
222
  }
223
223
  return await res.json();
package/src/package.ts CHANGED
@@ -1,3 +1,3 @@
1
1
  // Generated file from package.json. Issues importing JSON directly when publishing on commonjs/ESM - see https://github.com/microsoft/TypeScript/issues/51783
2
- export const PACKAGE_VERSION = "4.13.10";
2
+ export const PACKAGE_VERSION = "4.13.12";
3
3
  export const PACKAGE_NAME = "@huggingface/inference";
@@ -66,7 +66,7 @@ export class BlackForestLabsTextToImageTask extends TaskProviderHelper implement
66
66
  response: BlackForestLabsResponse,
67
67
  url?: string,
68
68
  headers?: HeadersInit,
69
- outputType?: "url" | "blob" | "json"
69
+ outputType?: "url" | "blob" | "json",
70
70
  ): Promise<string | Blob | Record<string, unknown>> {
71
71
  const logger = getLogger();
72
72
  const urlObj = new URL(response.polling_url);
@@ -79,7 +79,7 @@ export class BlackForestLabsTextToImageTask extends TaskProviderHelper implement
79
79
  throw new InferenceClientProviderApiError(
80
80
  "Failed to fetch result from black forest labs API",
81
81
  { url: urlObj.toString(), method: "GET", headers: { "Content-Type": "application/json" } },
82
- { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() }
82
+ { requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() },
83
83
  );
84
84
  }
85
85
  const payload = await resp.json();
@@ -106,7 +106,7 @@ export class BlackForestLabsTextToImageTask extends TaskProviderHelper implement
106
106
  }
107
107
  }
108
108
  throw new InferenceClientProviderOutputError(
109
- `Timed out while waiting for the result from black forest labs API - aborting after 5 attempts`
109
+ `Timed out while waiting for the result from black forest labs API - aborting after 5 attempts`,
110
110
  );
111
111
  }
112
112
  }
@@ -32,6 +32,7 @@ export const HARDCODED_MODEL_INFERENCE_MAPPING: Record<
32
32
  nebius: {},
33
33
  novita: {},
34
34
  nscale: {},
35
+ nvidia: {},
35
36
  openai: {},
36
37
  publicai: {},
37
38
  ovhcloud: {},