@huggingface/inference 3.3.0 → 3.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -49,6 +49,7 @@ You can send inference requests to third-party providers with the inference clie
49
49
  Currently, we support the following providers:
50
50
  - [Fal.ai](https://fal.ai)
51
51
  - [Fireworks AI](https://fireworks.ai)
52
+ - [Nebius](https://studio.nebius.ai)
52
53
  - [Replicate](https://replicate.com)
53
54
  - [Sambanova](https://sambanova.ai)
54
55
  - [Together](https://together.xyz)
@@ -71,12 +72,13 @@ When authenticated with a third-party provider key, the request is made directly
71
72
  Only a subset of models are supported when requesting third-party providers. You can check the list of supported models per pipeline tasks here:
72
73
  - [Fal.ai supported models](https://huggingface.co/api/partners/fal-ai/models)
73
74
  - [Fireworks AI supported models](https://huggingface.co/api/partners/fireworks-ai/models)
75
+ - [Nebius supported models](https://huggingface.co/api/partners/nebius/models)
74
76
  - [Replicate supported models](https://huggingface.co/api/partners/replicate/models)
75
77
  - [Sambanova supported models](https://huggingface.co/api/partners/sambanova/models)
76
78
  - [Together supported models](https://huggingface.co/api/partners/together/models)
77
79
  - [HF Inference API (serverless)](https://huggingface.co/models?inference=warm&sort=trending)
78
80
 
79
- ❗**Important note:** To be compatible, the third-party API must adhere to the "standard" shape API we expect on HF model pages for each pipeline task type.
81
+ ❗**Important note:** To be compatible, the third-party API must adhere to the "standard" shape API we expect on HF model pages for each pipeline task type.
80
82
  This is not an issue for LLMs as everyone converged on the OpenAI API anyways, but can be more tricky for other tasks like "text-to-image" or "automatic-speech-recognition" where there exists no standard API. Let us know if any help is needed or if we can make things easier for you!
81
83
 
82
84
  👋**Want to add another provider?** Get in touch if you'd like to add support for another Inference provider, and/or request it on https://huggingface.co/spaces/huggingface/HuggingDiscussions/discussions/49
@@ -463,7 +465,7 @@ await hf.zeroShotImageClassification({
463
465
  model: 'openai/clip-vit-large-patch14-336',
464
466
  inputs: {
465
467
  image: await (await fetch('https://placekitten.com/300/300')).blob()
466
- },
468
+ },
467
469
  parameters: {
468
470
  candidate_labels: ['cat', 'dog']
469
471
  }
package/dist/index.cjs CHANGED
@@ -103,6 +103,9 @@ var HF_ROUTER_URL = "https://router.huggingface.co";
103
103
  // src/providers/fal-ai.ts
104
104
  var FAL_AI_API_BASE_URL = "https://fal.run";
105
105
 
106
+ // src/providers/nebius.ts
107
+ var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
108
+
106
109
  // src/providers/replicate.ts
107
110
  var REPLICATE_API_BASE_URL = "https://api.replicate.com";
108
111
 
@@ -122,7 +125,7 @@ function isUrl(modelOrUrl) {
122
125
 
123
126
  // package.json
124
127
  var name = "@huggingface/inference";
125
- var version = "3.3.0";
128
+ var version = "3.3.1";
126
129
 
127
130
  // src/providers/consts.ts
128
131
  var HARDCODED_MODEL_ID_MAPPING = {
@@ -135,6 +138,7 @@ var HARDCODED_MODEL_ID_MAPPING = {
135
138
  "fal-ai": {},
136
139
  "fireworks-ai": {},
137
140
  "hf-inference": {},
141
+ nebius: {},
138
142
  replicate: {},
139
143
  sambanova: {},
140
144
  together: {}
@@ -191,7 +195,7 @@ async function makeRequestOptions(args, options) {
191
195
  const { accessToken, endpointUrl, provider: maybeProvider, model: maybeModel, ...remainingArgs } = args;
192
196
  let otherArgs = remainingArgs;
193
197
  const provider = maybeProvider ?? "hf-inference";
194
- const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion: chatCompletion2 } = options ?? {};
198
+ const { forceTask, includeCredentials, taskHint, chatCompletion: chatCompletion2 } = options ?? {};
195
199
  if (endpointUrl && provider !== "hf-inference") {
196
200
  throw new Error(`Cannot use endpointUrl with a third-party provider.`);
197
201
  }
@@ -229,17 +233,6 @@ async function makeRequestOptions(args, options) {
229
233
  if (!binary) {
230
234
  headers["Content-Type"] = "application/json";
231
235
  }
232
- if (provider === "hf-inference") {
233
- if (wait_for_model) {
234
- headers["X-Wait-For-Model"] = "true";
235
- }
236
- if (use_cache === false) {
237
- headers["X-Use-Cache"] = "false";
238
- }
239
- if (dont_load_model) {
240
- headers["X-Load-Model"] = "0";
241
- }
242
- }
243
236
  if (provider === "replicate") {
244
237
  headers["Prefer"] = "wait";
245
238
  }
@@ -258,7 +251,7 @@ async function makeRequestOptions(args, options) {
258
251
  method: "POST",
259
252
  body: binary ? args.data : JSON.stringify({
260
253
  ...otherArgs,
261
- ...chatCompletion2 || provider === "together" ? { model } : void 0
254
+ ...chatCompletion2 || provider === "together" || provider === "nebius" ? { model } : void 0
262
255
  }),
263
256
  ...credentials ? { credentials } : void 0,
264
257
  signal: options?.signal
@@ -275,6 +268,19 @@ function makeUrl(params) {
275
268
  const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : FAL_AI_API_BASE_URL;
276
269
  return `${baseUrl}/${params.model}`;
277
270
  }
271
+ case "nebius": {
272
+ const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : NEBIUS_API_BASE_URL;
273
+ if (params.taskHint === "text-to-image") {
274
+ return `${baseUrl}/v1/images/generations`;
275
+ }
276
+ if (params.taskHint === "text-generation") {
277
+ if (params.chatCompletion) {
278
+ return `${baseUrl}/v1/chat/completions`;
279
+ }
280
+ return `${baseUrl}/v1/completions`;
281
+ }
282
+ return baseUrl;
283
+ }
278
284
  case "replicate": {
279
285
  const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : REPLICATE_API_BASE_URL;
280
286
  if (params.model.includes(":")) {
@@ -341,11 +347,8 @@ async function loadTaskInfo() {
341
347
  async function request(args, options) {
342
348
  const { url, info } = await makeRequestOptions(args, options);
343
349
  const response = await (options?.fetch ?? fetch)(url, info);
344
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
345
- return request(args, {
346
- ...options,
347
- wait_for_model: true
348
- });
350
+ if (options?.retry_on_error !== false && response.status === 503) {
351
+ return request(args, options);
349
352
  }
350
353
  if (!response.ok) {
351
354
  const contentType = response.headers.get("Content-Type");
@@ -474,11 +477,8 @@ function newMessage() {
474
477
  async function* streamingRequest(args, options) {
475
478
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
476
479
  const response = await (options?.fetch ?? fetch)(url, info);
477
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
478
- return yield* streamingRequest(args, {
479
- ...options,
480
- wait_for_model: true
481
- });
480
+ if (options?.retry_on_error !== false && response.status === 503) {
481
+ return yield* streamingRequest(args, options);
482
482
  }
483
483
  if (!response.ok) {
484
484
  if (response.headers.get("Content-Type")?.startsWith("application/json")) {
@@ -769,13 +769,27 @@ async function objectDetection(args, options) {
769
769
  }
770
770
 
771
771
  // src/tasks/cv/textToImage.ts
772
+ function getResponseFormatArg(provider) {
773
+ switch (provider) {
774
+ case "fal-ai":
775
+ return { sync_mode: true };
776
+ case "nebius":
777
+ return { response_format: "b64_json" };
778
+ case "replicate":
779
+ return void 0;
780
+ case "together":
781
+ return { response_format: "base64" };
782
+ default:
783
+ return void 0;
784
+ }
785
+ }
772
786
  async function textToImage(args, options) {
773
- const payload = args.provider === "together" || args.provider === "fal-ai" || args.provider === "replicate" ? {
787
+ const payload = !args.provider || args.provider === "hf-inference" || args.provider === "sambanova" ? args : {
774
788
  ...omit(args, ["inputs", "parameters"]),
775
789
  ...args.parameters,
776
- ...args.provider !== "replicate" ? { response_format: "base64" } : void 0,
790
+ ...getResponseFormatArg(args.provider),
777
791
  prompt: args.inputs
778
- } : args;
792
+ };
779
793
  const res = await request(payload, {
780
794
  ...options,
781
795
  taskHint: "text-to-image"
@@ -1155,8 +1169,8 @@ async function chatCompletion(args, options) {
1155
1169
  taskHint: "text-generation",
1156
1170
  chatCompletion: true
1157
1171
  });
1158
- const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai does not output a system_fingerprint
1159
- (res.system_fingerprint === void 0 || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
1172
+ const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai and Nebius do not output a system_fingerprint
1173
+ (res.system_fingerprint === void 0 || res.system_fingerprint === null || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
1160
1174
  if (!isValidOutput) {
1161
1175
  throw new InferenceOutputError("Expected ChatCompletionOutput");
1162
1176
  }
@@ -1290,6 +1304,7 @@ var HfInferenceEndpoint = class {
1290
1304
  var INFERENCE_PROVIDERS = [
1291
1305
  "fal-ai",
1292
1306
  "fireworks-ai",
1307
+ "nebius",
1293
1308
  "hf-inference",
1294
1309
  "replicate",
1295
1310
  "sambanova",
package/dist/index.js CHANGED
@@ -48,6 +48,9 @@ var HF_ROUTER_URL = "https://router.huggingface.co";
48
48
  // src/providers/fal-ai.ts
49
49
  var FAL_AI_API_BASE_URL = "https://fal.run";
50
50
 
51
+ // src/providers/nebius.ts
52
+ var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
53
+
51
54
  // src/providers/replicate.ts
52
55
  var REPLICATE_API_BASE_URL = "https://api.replicate.com";
53
56
 
@@ -67,7 +70,7 @@ function isUrl(modelOrUrl) {
67
70
 
68
71
  // package.json
69
72
  var name = "@huggingface/inference";
70
- var version = "3.3.0";
73
+ var version = "3.3.1";
71
74
 
72
75
  // src/providers/consts.ts
73
76
  var HARDCODED_MODEL_ID_MAPPING = {
@@ -80,6 +83,7 @@ var HARDCODED_MODEL_ID_MAPPING = {
80
83
  "fal-ai": {},
81
84
  "fireworks-ai": {},
82
85
  "hf-inference": {},
86
+ nebius: {},
83
87
  replicate: {},
84
88
  sambanova: {},
85
89
  together: {}
@@ -136,7 +140,7 @@ async function makeRequestOptions(args, options) {
136
140
  const { accessToken, endpointUrl, provider: maybeProvider, model: maybeModel, ...remainingArgs } = args;
137
141
  let otherArgs = remainingArgs;
138
142
  const provider = maybeProvider ?? "hf-inference";
139
- const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion: chatCompletion2 } = options ?? {};
143
+ const { forceTask, includeCredentials, taskHint, chatCompletion: chatCompletion2 } = options ?? {};
140
144
  if (endpointUrl && provider !== "hf-inference") {
141
145
  throw new Error(`Cannot use endpointUrl with a third-party provider.`);
142
146
  }
@@ -174,17 +178,6 @@ async function makeRequestOptions(args, options) {
174
178
  if (!binary) {
175
179
  headers["Content-Type"] = "application/json";
176
180
  }
177
- if (provider === "hf-inference") {
178
- if (wait_for_model) {
179
- headers["X-Wait-For-Model"] = "true";
180
- }
181
- if (use_cache === false) {
182
- headers["X-Use-Cache"] = "false";
183
- }
184
- if (dont_load_model) {
185
- headers["X-Load-Model"] = "0";
186
- }
187
- }
188
181
  if (provider === "replicate") {
189
182
  headers["Prefer"] = "wait";
190
183
  }
@@ -203,7 +196,7 @@ async function makeRequestOptions(args, options) {
203
196
  method: "POST",
204
197
  body: binary ? args.data : JSON.stringify({
205
198
  ...otherArgs,
206
- ...chatCompletion2 || provider === "together" ? { model } : void 0
199
+ ...chatCompletion2 || provider === "together" || provider === "nebius" ? { model } : void 0
207
200
  }),
208
201
  ...credentials ? { credentials } : void 0,
209
202
  signal: options?.signal
@@ -220,6 +213,19 @@ function makeUrl(params) {
220
213
  const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : FAL_AI_API_BASE_URL;
221
214
  return `${baseUrl}/${params.model}`;
222
215
  }
216
+ case "nebius": {
217
+ const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : NEBIUS_API_BASE_URL;
218
+ if (params.taskHint === "text-to-image") {
219
+ return `${baseUrl}/v1/images/generations`;
220
+ }
221
+ if (params.taskHint === "text-generation") {
222
+ if (params.chatCompletion) {
223
+ return `${baseUrl}/v1/chat/completions`;
224
+ }
225
+ return `${baseUrl}/v1/completions`;
226
+ }
227
+ return baseUrl;
228
+ }
223
229
  case "replicate": {
224
230
  const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : REPLICATE_API_BASE_URL;
225
231
  if (params.model.includes(":")) {
@@ -286,11 +292,8 @@ async function loadTaskInfo() {
286
292
  async function request(args, options) {
287
293
  const { url, info } = await makeRequestOptions(args, options);
288
294
  const response = await (options?.fetch ?? fetch)(url, info);
289
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
290
- return request(args, {
291
- ...options,
292
- wait_for_model: true
293
- });
295
+ if (options?.retry_on_error !== false && response.status === 503) {
296
+ return request(args, options);
294
297
  }
295
298
  if (!response.ok) {
296
299
  const contentType = response.headers.get("Content-Type");
@@ -419,11 +422,8 @@ function newMessage() {
419
422
  async function* streamingRequest(args, options) {
420
423
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
421
424
  const response = await (options?.fetch ?? fetch)(url, info);
422
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
423
- return yield* streamingRequest(args, {
424
- ...options,
425
- wait_for_model: true
426
- });
425
+ if (options?.retry_on_error !== false && response.status === 503) {
426
+ return yield* streamingRequest(args, options);
427
427
  }
428
428
  if (!response.ok) {
429
429
  if (response.headers.get("Content-Type")?.startsWith("application/json")) {
@@ -714,13 +714,27 @@ async function objectDetection(args, options) {
714
714
  }
715
715
 
716
716
  // src/tasks/cv/textToImage.ts
717
+ function getResponseFormatArg(provider) {
718
+ switch (provider) {
719
+ case "fal-ai":
720
+ return { sync_mode: true };
721
+ case "nebius":
722
+ return { response_format: "b64_json" };
723
+ case "replicate":
724
+ return void 0;
725
+ case "together":
726
+ return { response_format: "base64" };
727
+ default:
728
+ return void 0;
729
+ }
730
+ }
717
731
  async function textToImage(args, options) {
718
- const payload = args.provider === "together" || args.provider === "fal-ai" || args.provider === "replicate" ? {
732
+ const payload = !args.provider || args.provider === "hf-inference" || args.provider === "sambanova" ? args : {
719
733
  ...omit(args, ["inputs", "parameters"]),
720
734
  ...args.parameters,
721
- ...args.provider !== "replicate" ? { response_format: "base64" } : void 0,
735
+ ...getResponseFormatArg(args.provider),
722
736
  prompt: args.inputs
723
- } : args;
737
+ };
724
738
  const res = await request(payload, {
725
739
  ...options,
726
740
  taskHint: "text-to-image"
@@ -1100,8 +1114,8 @@ async function chatCompletion(args, options) {
1100
1114
  taskHint: "text-generation",
1101
1115
  chatCompletion: true
1102
1116
  });
1103
- const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai does not output a system_fingerprint
1104
- (res.system_fingerprint === void 0 || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
1117
+ const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai and Nebius do not output a system_fingerprint
1118
+ (res.system_fingerprint === void 0 || res.system_fingerprint === null || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
1105
1119
  if (!isValidOutput) {
1106
1120
  throw new InferenceOutputError("Expected ChatCompletionOutput");
1107
1121
  }
@@ -1235,6 +1249,7 @@ var HfInferenceEndpoint = class {
1235
1249
  var INFERENCE_PROVIDERS = [
1236
1250
  "fal-ai",
1237
1251
  "fireworks-ai",
1252
+ "nebius",
1238
1253
  "hf-inference",
1239
1254
  "replicate",
1240
1255
  "sambanova",
@@ -1 +1 @@
1
- {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAOA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAapE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAqH7C"}
1
+ {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAQA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAapE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAwG7C"}
@@ -1 +1 @@
1
- {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,KAAK,UAAU,GAAG,MAAM,CAAC;AACzB;;;;;;GAMG;AACH,eAAO,MAAM,0BAA0B,EAAE,MAAM,CAAC,iBAAiB,EAAE,MAAM,CAAC,OAAO,EAAE,UAAU,CAAC,CAa7F,CAAC"}
1
+ {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,KAAK,UAAU,GAAG,MAAM,CAAC;AACzB;;;;;;GAMG;AACH,eAAO,MAAM,0BAA0B,EAAE,MAAM,CAAC,iBAAiB,EAAE,MAAM,CAAC,OAAO,EAAE,UAAU,CAAC,CAc7F,CAAC"}
@@ -0,0 +1,18 @@
1
+ export declare const NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
2
+ /**
3
+ * See the registered mapping of HF model ID => Nebius model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/nebius/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Nebius and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Nebius, please open an issue on the present repo
14
+ * and we will tag Nebius team members.
15
+ *
16
+ * Thanks!
17
+ */
18
+ //# sourceMappingURL=nebius.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"nebius.d.ts","sourceRoot":"","sources":["../../../src/providers/nebius.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,mBAAmB,iCAAiC,CAAC;AAElE;;;;;;;;;;;;;;;GAeG"}
@@ -1 +1 @@
1
- {"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/request.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAGvE;;GAEG;AACH,wBAAsB,OAAO,CAAC,CAAC,EAC9B,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC,CAAC,CAAC,CAmCZ"}
1
+ {"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/request.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAGvE;;GAEG;AACH,wBAAsB,OAAO,CAAC,CAAC,EAC9B,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC,CAAC,CAAC,CAgCZ"}
@@ -1 +1 @@
1
- {"version":3,"file":"streamingRequest.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/streamingRequest.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAKvE;;GAEG;AACH,wBAAuB,gBAAgB,CAAC,CAAC,EACxC,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,cAAc,CAAC,CAAC,CAAC,CAsFnB"}
1
+ {"version":3,"file":"streamingRequest.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/streamingRequest.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAKvE;;GAEG;AACH,wBAAuB,gBAAgB,CAAC,CAAC,EACxC,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,cAAc,CAAC,CAAC,CAAC,CAmFnB"}
@@ -1 +1 @@
1
- {"version":3,"file":"textToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/textToImage.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAqB,MAAM,oBAAoB,CAAC;AAE9E,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAIrD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG,gBAAgB,CAAC;AAW1D;;;GAGG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAoCzF"}
1
+ {"version":3,"file":"textToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/textToImage.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAqB,MAAM,oBAAoB,CAAC;AAE9E,OAAO,KAAK,EAAE,QAAQ,EAAqB,OAAO,EAAE,MAAM,aAAa,CAAC;AAIxE,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG,gBAAgB,CAAC;AA0B1D;;;GAGG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAqCzF"}
@@ -1 +1 @@
1
- {"version":3,"file":"chatCompletion.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/chatCompletion.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEpF;;GAEG;AACH,wBAAsB,cAAc,CACnC,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,oBAAoB,CAAC,CAoB/B"}
1
+ {"version":3,"file":"chatCompletion.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/chatCompletion.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEpF;;GAEG;AACH,wBAAsB,cAAc,CACnC,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,oBAAoB,CAAC,CAuB/B"}
@@ -5,25 +5,9 @@ import type { ChatCompletionInput, PipelineType } from "@huggingface/tasks";
5
5
  export type ModelId = string;
6
6
  export interface Options {
7
7
  /**
8
- * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
8
+ * (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters.
9
9
  */
10
10
  retry_on_error?: boolean;
11
- /**
12
- * (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
13
- */
14
- use_cache?: boolean;
15
- /**
16
- * (Default: false). Boolean. Do not load the model if it's not already available.
17
- */
18
- dont_load_model?: boolean;
19
- /**
20
- * (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
21
- */
22
- use_gpu?: boolean;
23
- /**
24
- * (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
25
- */
26
- wait_for_model?: boolean;
27
11
  /**
28
12
  * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
29
13
  */
@@ -38,7 +22,7 @@ export interface Options {
38
22
  includeCredentials?: string | boolean;
39
23
  }
40
24
  export type InferenceTask = Exclude<PipelineType, "other">;
41
- export declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "hf-inference", "replicate", "sambanova", "together"];
25
+ export declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "nebius", "hf-inference", "replicate", "sambanova", "together"];
42
26
  export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
43
27
  export interface BaseArgs {
44
28
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAE5E;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,eAAO,MAAM,mBAAmB,2FAOtB,CAAC;AACX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAE5E;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IAEzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,eAAO,MAAM,mBAAmB,qGAQtB,CAAC;AACX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "3.3.0",
3
+ "version": "3.3.1",
4
4
  "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -1,5 +1,6 @@
1
1
  import { HF_HUB_URL, HF_ROUTER_URL } from "../config";
2
2
  import { FAL_AI_API_BASE_URL } from "../providers/fal-ai";
3
+ import { NEBIUS_API_BASE_URL } from "../providers/nebius";
3
4
  import { REPLICATE_API_BASE_URL } from "../providers/replicate";
4
5
  import { SAMBANOVA_API_BASE_URL } from "../providers/sambanova";
5
6
  import { TOGETHER_API_BASE_URL } from "../providers/together";
@@ -38,8 +39,7 @@ export async function makeRequestOptions(
38
39
  let otherArgs = remainingArgs;
39
40
  const provider = maybeProvider ?? "hf-inference";
40
41
 
41
- const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion } =
42
- options ?? {};
42
+ const { forceTask, includeCredentials, taskHint, chatCompletion } = options ?? {};
43
43
 
44
44
  if (endpointUrl && provider !== "hf-inference") {
45
45
  throw new Error(`Cannot use endpointUrl with a third-party provider.`);
@@ -101,18 +101,6 @@ export async function makeRequestOptions(
101
101
  headers["Content-Type"] = "application/json";
102
102
  }
103
103
 
104
- if (provider === "hf-inference") {
105
- if (wait_for_model) {
106
- headers["X-Wait-For-Model"] = "true";
107
- }
108
- if (use_cache === false) {
109
- headers["X-Use-Cache"] = "false";
110
- }
111
- if (dont_load_model) {
112
- headers["X-Load-Model"] = "0";
113
- }
114
- }
115
-
116
104
  if (provider === "replicate") {
117
105
  headers["Prefer"] = "wait";
118
106
  }
@@ -143,7 +131,7 @@ export async function makeRequestOptions(
143
131
  ? args.data
144
132
  : JSON.stringify({
145
133
  ...otherArgs,
146
- ...(chatCompletion || provider === "together" ? { model } : undefined),
134
+ ...(chatCompletion || provider === "together" || provider === "nebius" ? { model } : undefined),
147
135
  }),
148
136
  ...(credentials ? { credentials } : undefined),
149
137
  signal: options?.signal,
@@ -172,6 +160,22 @@ function makeUrl(params: {
172
160
  : FAL_AI_API_BASE_URL;
173
161
  return `${baseUrl}/${params.model}`;
174
162
  }
163
+ case "nebius": {
164
+ const baseUrl = shouldProxy
165
+ ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider)
166
+ : NEBIUS_API_BASE_URL;
167
+
168
+ if (params.taskHint === "text-to-image") {
169
+ return `${baseUrl}/v1/images/generations`;
170
+ }
171
+ if (params.taskHint === "text-generation") {
172
+ if (params.chatCompletion) {
173
+ return `${baseUrl}/v1/chat/completions`;
174
+ }
175
+ return `${baseUrl}/v1/completions`;
176
+ }
177
+ return baseUrl;
178
+ }
175
179
  case "replicate": {
176
180
  const baseUrl = shouldProxy
177
181
  ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider)
@@ -19,6 +19,7 @@ export const HARDCODED_MODEL_ID_MAPPING: Record<InferenceProvider, Record<ModelI
19
19
  "fal-ai": {},
20
20
  "fireworks-ai": {},
21
21
  "hf-inference": {},
22
+ nebius: {},
22
23
  replicate: {},
23
24
  sambanova: {},
24
25
  together: {},
@@ -0,0 +1,18 @@
1
+ export const NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
2
+
3
+ /**
4
+ * See the registered mapping of HF model ID => Nebius model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/nebius/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Nebius and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Nebius, please open an issue on the present repo
15
+ * and we will tag Nebius team members.
16
+ *
17
+ * Thanks!
18
+ */
@@ -18,11 +18,8 @@ export async function request<T>(
18
18
  const { url, info } = await makeRequestOptions(args, options);
19
19
  const response = await (options?.fetch ?? fetch)(url, info);
20
20
 
21
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
22
- return request(args, {
23
- ...options,
24
- wait_for_model: true,
25
- });
21
+ if (options?.retry_on_error !== false && response.status === 503) {
22
+ return request(args, options);
26
23
  }
27
24
 
28
25
  if (!response.ok) {
@@ -20,11 +20,8 @@ export async function* streamingRequest<T>(
20
20
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
21
21
  const response = await (options?.fetch ?? fetch)(url, info);
22
22
 
23
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
24
- return yield* streamingRequest(args, {
25
- ...options,
26
- wait_for_model: true,
27
- });
23
+ if (options?.retry_on_error !== false && response.status === 503) {
24
+ return yield* streamingRequest(args, options);
28
25
  }
29
26
  if (!response.ok) {
30
27
  if (response.headers.get("Content-Type")?.startsWith("application/json")) {
@@ -1,6 +1,6 @@
1
1
  import type { TextToImageInput, TextToImageOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
- import type { BaseArgs, Options } from "../../types";
3
+ import type { BaseArgs, InferenceProvider, Options } from "../../types";
4
4
  import { omit } from "../../utils/omit";
5
5
  import { request } from "../custom/request";
6
6
 
@@ -15,24 +15,40 @@ interface OutputUrlImageGeneration {
15
15
  output: string[];
16
16
  }
17
17
 
18
+ function getResponseFormatArg(provider: InferenceProvider) {
19
+ switch (provider) {
20
+ case "fal-ai":
21
+ return { sync_mode: true };
22
+ case "nebius":
23
+ return { response_format: "b64_json" };
24
+ case "replicate":
25
+ return undefined;
26
+ case "together":
27
+ return { response_format: "base64" };
28
+ default:
29
+ return undefined;
30
+ }
31
+ }
32
+
18
33
  /**
19
34
  * This task reads some text input and outputs an image.
20
35
  * Recommended model: stabilityai/stable-diffusion-2
21
36
  */
22
37
  export async function textToImage(args: TextToImageArgs, options?: Options): Promise<Blob> {
23
38
  const payload =
24
- args.provider === "together" || args.provider === "fal-ai" || args.provider === "replicate"
25
- ? {
39
+ !args.provider || args.provider === "hf-inference" || args.provider === "sambanova"
40
+ ? args
41
+ : {
26
42
  ...omit(args, ["inputs", "parameters"]),
27
43
  ...args.parameters,
28
- ...(args.provider !== "replicate" ? { response_format: "base64" } : undefined),
44
+ ...getResponseFormatArg(args.provider),
29
45
  prompt: args.inputs,
30
- }
31
- : args;
46
+ };
32
47
  const res = await request<TextToImageOutput | Base64ImageGeneration | OutputUrlImageGeneration>(payload, {
33
48
  ...options,
34
49
  taskHint: "text-to-image",
35
50
  });
51
+
36
52
  if (res && typeof res === "object") {
37
53
  if (args.provider === "fal-ai" && "images" in res && Array.isArray(res.images) && res.images[0].url) {
38
54
  const image = await fetch(res.images[0].url);
@@ -15,14 +15,17 @@ export async function chatCompletion(
15
15
  taskHint: "text-generation",
16
16
  chatCompletion: true,
17
17
  });
18
+
18
19
  const isValidOutput =
19
20
  typeof res === "object" &&
20
21
  Array.isArray(res?.choices) &&
21
22
  typeof res?.created === "number" &&
22
23
  typeof res?.id === "string" &&
23
24
  typeof res?.model === "string" &&
24
- /// Together.ai does not output a system_fingerprint
25
- (res.system_fingerprint === undefined || typeof res.system_fingerprint === "string") &&
25
+ /// Together.ai and Nebius do not output a system_fingerprint
26
+ (res.system_fingerprint === undefined ||
27
+ res.system_fingerprint === null ||
28
+ typeof res.system_fingerprint === "string") &&
26
29
  typeof res?.usage === "object";
27
30
 
28
31
  if (!isValidOutput) {
package/src/types.ts CHANGED
@@ -7,26 +7,10 @@ export type ModelId = string;
7
7
 
8
8
  export interface Options {
9
9
  /**
10
- * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
10
+ * (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters.
11
11
  */
12
12
  retry_on_error?: boolean;
13
- /**
14
- * (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
15
- */
16
- use_cache?: boolean;
17
- /**
18
- * (Default: false). Boolean. Do not load the model if it's not already available.
19
- */
20
- dont_load_model?: boolean;
21
- /**
22
- * (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
23
- */
24
- use_gpu?: boolean;
25
13
 
26
- /**
27
- * (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
28
- */
29
- wait_for_model?: boolean;
30
14
  /**
31
15
  * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
32
16
  */
@@ -47,6 +31,7 @@ export type InferenceTask = Exclude<PipelineType, "other">;
47
31
  export const INFERENCE_PROVIDERS = [
48
32
  "fal-ai",
49
33
  "fireworks-ai",
34
+ "nebius",
50
35
  "hf-inference",
51
36
  "replicate",
52
37
  "sambanova",