@huggingface/inference 3.3.0 → 3.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -49,6 +49,7 @@ You can send inference requests to third-party providers with the inference clie
49
49
  Currently, we support the following providers:
50
50
  - [Fal.ai](https://fal.ai)
51
51
  - [Fireworks AI](https://fireworks.ai)
52
+ - [Nebius](https://studio.nebius.ai)
52
53
  - [Replicate](https://replicate.com)
53
54
  - [Sambanova](https://sambanova.ai)
54
55
  - [Together](https://together.xyz)
@@ -71,12 +72,13 @@ When authenticated with a third-party provider key, the request is made directly
71
72
  Only a subset of models are supported when requesting third-party providers. You can check the list of supported models per pipeline tasks here:
72
73
  - [Fal.ai supported models](https://huggingface.co/api/partners/fal-ai/models)
73
74
  - [Fireworks AI supported models](https://huggingface.co/api/partners/fireworks-ai/models)
75
+ - [Nebius supported models](https://huggingface.co/api/partners/nebius/models)
74
76
  - [Replicate supported models](https://huggingface.co/api/partners/replicate/models)
75
77
  - [Sambanova supported models](https://huggingface.co/api/partners/sambanova/models)
76
78
  - [Together supported models](https://huggingface.co/api/partners/together/models)
77
79
  - [HF Inference API (serverless)](https://huggingface.co/models?inference=warm&sort=trending)
78
80
 
79
- ❗**Important note:** To be compatible, the third-party API must adhere to the "standard" shape API we expect on HF model pages for each pipeline task type.
81
+ ❗**Important note:** To be compatible, the third-party API must adhere to the "standard" shape API we expect on HF model pages for each pipeline task type.
80
82
  This is not an issue for LLMs as everyone converged on the OpenAI API anyways, but can be more tricky for other tasks like "text-to-image" or "automatic-speech-recognition" where there exists no standard API. Let us know if any help is needed or if we can make things easier for you!
81
83
 
82
84
  👋**Want to add another provider?** Get in touch if you'd like to add support for another Inference provider, and/or request it on https://huggingface.co/spaces/huggingface/HuggingDiscussions/discussions/49
@@ -123,7 +125,7 @@ for await (const output of hf.textGenerationStream({
123
125
 
124
126
  ### Text Generation (Chat Completion API Compatible)
125
127
 
126
- Using the `chatCompletion` method, you can generate text with models compatible with the OpenAI Chat Completion API. All models served by [TGI](https://huggingface.co/docs/text-generation-inference/) on Hugging Face support Messages API.
128
+ Using the `chatCompletion` method, you can generate text with models compatible with the OpenAI Chat Completion API. All models served by [TGI](https://api-inference.huggingface.co/framework/text-generation-inference) on Hugging Face support Messages API.
127
129
 
128
130
  [Demo](https://huggingface.co/spaces/huggingfacejs/streaming-chat-completion)
129
131
 
@@ -463,7 +465,7 @@ await hf.zeroShotImageClassification({
463
465
  model: 'openai/clip-vit-large-patch14-336',
464
466
  inputs: {
465
467
  image: await (await fetch('https://placekitten.com/300/300')).blob()
466
- },
468
+ },
467
469
  parameters: {
468
470
  candidate_labels: ['cat', 'dog']
469
471
  }
package/dist/index.cjs CHANGED
@@ -98,11 +98,13 @@ __export(tasks_exports, {
98
98
 
99
99
  // src/config.ts
100
100
  var HF_HUB_URL = "https://huggingface.co";
101
- var HF_ROUTER_URL = "https://router.huggingface.co";
102
101
 
103
102
  // src/providers/fal-ai.ts
104
103
  var FAL_AI_API_BASE_URL = "https://fal.run";
105
104
 
105
+ // src/providers/nebius.ts
106
+ var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
107
+
106
108
  // src/providers/replicate.ts
107
109
  var REPLICATE_API_BASE_URL = "https://api.replicate.com";
108
110
 
@@ -122,7 +124,7 @@ function isUrl(modelOrUrl) {
122
124
 
123
125
  // package.json
124
126
  var name = "@huggingface/inference";
125
- var version = "3.3.0";
127
+ var version = "3.3.2";
126
128
 
127
129
  // src/providers/consts.ts
128
130
  var HARDCODED_MODEL_ID_MAPPING = {
@@ -135,6 +137,7 @@ var HARDCODED_MODEL_ID_MAPPING = {
135
137
  "fal-ai": {},
136
138
  "fireworks-ai": {},
137
139
  "hf-inference": {},
140
+ nebius: {},
138
141
  replicate: {},
139
142
  sambanova: {},
140
143
  together: {}
@@ -185,13 +188,13 @@ async function getProviderModelId(params, args, options = {}) {
185
188
  }
186
189
 
187
190
  // src/lib/makeRequestOptions.ts
188
- var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
191
+ var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_HUB_URL}/api/inference-proxy/{{PROVIDER}}`;
189
192
  var tasks = null;
190
193
  async function makeRequestOptions(args, options) {
191
194
  const { accessToken, endpointUrl, provider: maybeProvider, model: maybeModel, ...remainingArgs } = args;
192
195
  let otherArgs = remainingArgs;
193
196
  const provider = maybeProvider ?? "hf-inference";
194
- const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion: chatCompletion2 } = options ?? {};
197
+ const { forceTask, includeCredentials, taskHint, chatCompletion: chatCompletion2 } = options ?? {};
195
198
  if (endpointUrl && provider !== "hf-inference") {
196
199
  throw new Error(`Cannot use endpointUrl with a third-party provider.`);
197
200
  }
@@ -229,17 +232,6 @@ async function makeRequestOptions(args, options) {
229
232
  if (!binary) {
230
233
  headers["Content-Type"] = "application/json";
231
234
  }
232
- if (provider === "hf-inference") {
233
- if (wait_for_model) {
234
- headers["X-Wait-For-Model"] = "true";
235
- }
236
- if (use_cache === false) {
237
- headers["X-Use-Cache"] = "false";
238
- }
239
- if (dont_load_model) {
240
- headers["X-Load-Model"] = "0";
241
- }
242
- }
243
235
  if (provider === "replicate") {
244
236
  headers["Prefer"] = "wait";
245
237
  }
@@ -258,7 +250,7 @@ async function makeRequestOptions(args, options) {
258
250
  method: "POST",
259
251
  body: binary ? args.data : JSON.stringify({
260
252
  ...otherArgs,
261
- ...chatCompletion2 || provider === "together" ? { model } : void 0
253
+ ...chatCompletion2 || provider === "together" || provider === "nebius" ? { model } : void 0
262
254
  }),
263
255
  ...credentials ? { credentials } : void 0,
264
256
  signal: options?.signal
@@ -275,6 +267,19 @@ function makeUrl(params) {
275
267
  const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : FAL_AI_API_BASE_URL;
276
268
  return `${baseUrl}/${params.model}`;
277
269
  }
270
+ case "nebius": {
271
+ const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : NEBIUS_API_BASE_URL;
272
+ if (params.taskHint === "text-to-image") {
273
+ return `${baseUrl}/v1/images/generations`;
274
+ }
275
+ if (params.taskHint === "text-generation") {
276
+ if (params.chatCompletion) {
277
+ return `${baseUrl}/v1/chat/completions`;
278
+ }
279
+ return `${baseUrl}/v1/completions`;
280
+ }
281
+ return baseUrl;
282
+ }
278
283
  case "replicate": {
279
284
  const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : REPLICATE_API_BASE_URL;
280
285
  if (params.model.includes(":")) {
@@ -341,11 +346,8 @@ async function loadTaskInfo() {
341
346
  async function request(args, options) {
342
347
  const { url, info } = await makeRequestOptions(args, options);
343
348
  const response = await (options?.fetch ?? fetch)(url, info);
344
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
345
- return request(args, {
346
- ...options,
347
- wait_for_model: true
348
- });
349
+ if (options?.retry_on_error !== false && response.status === 503) {
350
+ return request(args, options);
349
351
  }
350
352
  if (!response.ok) {
351
353
  const contentType = response.headers.get("Content-Type");
@@ -474,11 +476,8 @@ function newMessage() {
474
476
  async function* streamingRequest(args, options) {
475
477
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
476
478
  const response = await (options?.fetch ?? fetch)(url, info);
477
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
478
- return yield* streamingRequest(args, {
479
- ...options,
480
- wait_for_model: true
481
- });
479
+ if (options?.retry_on_error !== false && response.status === 503) {
480
+ return yield* streamingRequest(args, options);
482
481
  }
483
482
  if (!response.ok) {
484
483
  if (response.headers.get("Content-Type")?.startsWith("application/json")) {
@@ -769,13 +768,27 @@ async function objectDetection(args, options) {
769
768
  }
770
769
 
771
770
  // src/tasks/cv/textToImage.ts
771
+ function getResponseFormatArg(provider) {
772
+ switch (provider) {
773
+ case "fal-ai":
774
+ return { sync_mode: true };
775
+ case "nebius":
776
+ return { response_format: "b64_json" };
777
+ case "replicate":
778
+ return void 0;
779
+ case "together":
780
+ return { response_format: "base64" };
781
+ default:
782
+ return void 0;
783
+ }
784
+ }
772
785
  async function textToImage(args, options) {
773
- const payload = args.provider === "together" || args.provider === "fal-ai" || args.provider === "replicate" ? {
786
+ const payload = !args.provider || args.provider === "hf-inference" || args.provider === "sambanova" ? args : {
774
787
  ...omit(args, ["inputs", "parameters"]),
775
788
  ...args.parameters,
776
- ...args.provider !== "replicate" ? { response_format: "base64" } : void 0,
789
+ ...getResponseFormatArg(args.provider),
777
790
  prompt: args.inputs
778
- } : args;
791
+ };
779
792
  const res = await request(payload, {
780
793
  ...options,
781
794
  taskHint: "text-to-image"
@@ -1155,8 +1168,8 @@ async function chatCompletion(args, options) {
1155
1168
  taskHint: "text-generation",
1156
1169
  chatCompletion: true
1157
1170
  });
1158
- const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai does not output a system_fingerprint
1159
- (res.system_fingerprint === void 0 || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
1171
+ const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai and Nebius do not output a system_fingerprint
1172
+ (res.system_fingerprint === void 0 || res.system_fingerprint === null || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
1160
1173
  if (!isValidOutput) {
1161
1174
  throw new InferenceOutputError("Expected ChatCompletionOutput");
1162
1175
  }
@@ -1290,6 +1303,7 @@ var HfInferenceEndpoint = class {
1290
1303
  var INFERENCE_PROVIDERS = [
1291
1304
  "fal-ai",
1292
1305
  "fireworks-ai",
1306
+ "nebius",
1293
1307
  "hf-inference",
1294
1308
  "replicate",
1295
1309
  "sambanova",
package/dist/index.js CHANGED
@@ -43,11 +43,13 @@ __export(tasks_exports, {
43
43
 
44
44
  // src/config.ts
45
45
  var HF_HUB_URL = "https://huggingface.co";
46
- var HF_ROUTER_URL = "https://router.huggingface.co";
47
46
 
48
47
  // src/providers/fal-ai.ts
49
48
  var FAL_AI_API_BASE_URL = "https://fal.run";
50
49
 
50
+ // src/providers/nebius.ts
51
+ var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
52
+
51
53
  // src/providers/replicate.ts
52
54
  var REPLICATE_API_BASE_URL = "https://api.replicate.com";
53
55
 
@@ -67,7 +69,7 @@ function isUrl(modelOrUrl) {
67
69
 
68
70
  // package.json
69
71
  var name = "@huggingface/inference";
70
- var version = "3.3.0";
72
+ var version = "3.3.2";
71
73
 
72
74
  // src/providers/consts.ts
73
75
  var HARDCODED_MODEL_ID_MAPPING = {
@@ -80,6 +82,7 @@ var HARDCODED_MODEL_ID_MAPPING = {
80
82
  "fal-ai": {},
81
83
  "fireworks-ai": {},
82
84
  "hf-inference": {},
85
+ nebius: {},
83
86
  replicate: {},
84
87
  sambanova: {},
85
88
  together: {}
@@ -130,13 +133,13 @@ async function getProviderModelId(params, args, options = {}) {
130
133
  }
131
134
 
132
135
  // src/lib/makeRequestOptions.ts
133
- var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
136
+ var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_HUB_URL}/api/inference-proxy/{{PROVIDER}}`;
134
137
  var tasks = null;
135
138
  async function makeRequestOptions(args, options) {
136
139
  const { accessToken, endpointUrl, provider: maybeProvider, model: maybeModel, ...remainingArgs } = args;
137
140
  let otherArgs = remainingArgs;
138
141
  const provider = maybeProvider ?? "hf-inference";
139
- const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion: chatCompletion2 } = options ?? {};
142
+ const { forceTask, includeCredentials, taskHint, chatCompletion: chatCompletion2 } = options ?? {};
140
143
  if (endpointUrl && provider !== "hf-inference") {
141
144
  throw new Error(`Cannot use endpointUrl with a third-party provider.`);
142
145
  }
@@ -174,17 +177,6 @@ async function makeRequestOptions(args, options) {
174
177
  if (!binary) {
175
178
  headers["Content-Type"] = "application/json";
176
179
  }
177
- if (provider === "hf-inference") {
178
- if (wait_for_model) {
179
- headers["X-Wait-For-Model"] = "true";
180
- }
181
- if (use_cache === false) {
182
- headers["X-Use-Cache"] = "false";
183
- }
184
- if (dont_load_model) {
185
- headers["X-Load-Model"] = "0";
186
- }
187
- }
188
180
  if (provider === "replicate") {
189
181
  headers["Prefer"] = "wait";
190
182
  }
@@ -203,7 +195,7 @@ async function makeRequestOptions(args, options) {
203
195
  method: "POST",
204
196
  body: binary ? args.data : JSON.stringify({
205
197
  ...otherArgs,
206
- ...chatCompletion2 || provider === "together" ? { model } : void 0
198
+ ...chatCompletion2 || provider === "together" || provider === "nebius" ? { model } : void 0
207
199
  }),
208
200
  ...credentials ? { credentials } : void 0,
209
201
  signal: options?.signal
@@ -220,6 +212,19 @@ function makeUrl(params) {
220
212
  const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : FAL_AI_API_BASE_URL;
221
213
  return `${baseUrl}/${params.model}`;
222
214
  }
215
+ case "nebius": {
216
+ const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : NEBIUS_API_BASE_URL;
217
+ if (params.taskHint === "text-to-image") {
218
+ return `${baseUrl}/v1/images/generations`;
219
+ }
220
+ if (params.taskHint === "text-generation") {
221
+ if (params.chatCompletion) {
222
+ return `${baseUrl}/v1/chat/completions`;
223
+ }
224
+ return `${baseUrl}/v1/completions`;
225
+ }
226
+ return baseUrl;
227
+ }
223
228
  case "replicate": {
224
229
  const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : REPLICATE_API_BASE_URL;
225
230
  if (params.model.includes(":")) {
@@ -286,11 +291,8 @@ async function loadTaskInfo() {
286
291
  async function request(args, options) {
287
292
  const { url, info } = await makeRequestOptions(args, options);
288
293
  const response = await (options?.fetch ?? fetch)(url, info);
289
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
290
- return request(args, {
291
- ...options,
292
- wait_for_model: true
293
- });
294
+ if (options?.retry_on_error !== false && response.status === 503) {
295
+ return request(args, options);
294
296
  }
295
297
  if (!response.ok) {
296
298
  const contentType = response.headers.get("Content-Type");
@@ -419,11 +421,8 @@ function newMessage() {
419
421
  async function* streamingRequest(args, options) {
420
422
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
421
423
  const response = await (options?.fetch ?? fetch)(url, info);
422
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
423
- return yield* streamingRequest(args, {
424
- ...options,
425
- wait_for_model: true
426
- });
424
+ if (options?.retry_on_error !== false && response.status === 503) {
425
+ return yield* streamingRequest(args, options);
427
426
  }
428
427
  if (!response.ok) {
429
428
  if (response.headers.get("Content-Type")?.startsWith("application/json")) {
@@ -714,13 +713,27 @@ async function objectDetection(args, options) {
714
713
  }
715
714
 
716
715
  // src/tasks/cv/textToImage.ts
716
+ function getResponseFormatArg(provider) {
717
+ switch (provider) {
718
+ case "fal-ai":
719
+ return { sync_mode: true };
720
+ case "nebius":
721
+ return { response_format: "b64_json" };
722
+ case "replicate":
723
+ return void 0;
724
+ case "together":
725
+ return { response_format: "base64" };
726
+ default:
727
+ return void 0;
728
+ }
729
+ }
717
730
  async function textToImage(args, options) {
718
- const payload = args.provider === "together" || args.provider === "fal-ai" || args.provider === "replicate" ? {
731
+ const payload = !args.provider || args.provider === "hf-inference" || args.provider === "sambanova" ? args : {
719
732
  ...omit(args, ["inputs", "parameters"]),
720
733
  ...args.parameters,
721
- ...args.provider !== "replicate" ? { response_format: "base64" } : void 0,
734
+ ...getResponseFormatArg(args.provider),
722
735
  prompt: args.inputs
723
- } : args;
736
+ };
724
737
  const res = await request(payload, {
725
738
  ...options,
726
739
  taskHint: "text-to-image"
@@ -1100,8 +1113,8 @@ async function chatCompletion(args, options) {
1100
1113
  taskHint: "text-generation",
1101
1114
  chatCompletion: true
1102
1115
  });
1103
- const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai does not output a system_fingerprint
1104
- (res.system_fingerprint === void 0 || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
1116
+ const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai and Nebius do not output a system_fingerprint
1117
+ (res.system_fingerprint === void 0 || res.system_fingerprint === null || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
1105
1118
  if (!isValidOutput) {
1106
1119
  throw new InferenceOutputError("Expected ChatCompletionOutput");
1107
1120
  }
@@ -1235,6 +1248,7 @@ var HfInferenceEndpoint = class {
1235
1248
  var INFERENCE_PROVIDERS = [
1236
1249
  "fal-ai",
1237
1250
  "fireworks-ai",
1251
+ "nebius",
1238
1252
  "hf-inference",
1239
1253
  "replicate",
1240
1254
  "sambanova",
@@ -1,3 +1,2 @@
1
1
  export declare const HF_HUB_URL = "https://huggingface.co";
2
- export declare const HF_ROUTER_URL = "https://router.huggingface.co";
3
2
  //# sourceMappingURL=config.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../src/config.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU,2BAA2B,CAAC;AACnD,eAAO,MAAM,aAAa,kCAAkC,CAAC"}
1
+ {"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../src/config.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU,2BAA2B,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAOA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAapE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAqH7C"}
1
+ {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAQA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAapE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAwG7C"}
@@ -1 +1 @@
1
- {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,KAAK,UAAU,GAAG,MAAM,CAAC;AACzB;;;;;;GAMG;AACH,eAAO,MAAM,0BAA0B,EAAE,MAAM,CAAC,iBAAiB,EAAE,MAAM,CAAC,OAAO,EAAE,UAAU,CAAC,CAa7F,CAAC"}
1
+ {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,KAAK,UAAU,GAAG,MAAM,CAAC;AACzB;;;;;;GAMG;AACH,eAAO,MAAM,0BAA0B,EAAE,MAAM,CAAC,iBAAiB,EAAE,MAAM,CAAC,OAAO,EAAE,UAAU,CAAC,CAc7F,CAAC"}
@@ -0,0 +1,18 @@
1
+ export declare const NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
2
+ /**
3
+ * See the registered mapping of HF model ID => Nebius model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/nebius/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Nebius and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Nebius, please open an issue on the present repo
14
+ * and we will tag Nebius team members.
15
+ *
16
+ * Thanks!
17
+ */
18
+ //# sourceMappingURL=nebius.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"nebius.d.ts","sourceRoot":"","sources":["../../../src/providers/nebius.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,mBAAmB,iCAAiC,CAAC;AAElE;;;;;;;;;;;;;;;GAeG"}
@@ -1 +1 @@
1
- {"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/request.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAGvE;;GAEG;AACH,wBAAsB,OAAO,CAAC,CAAC,EAC9B,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC,CAAC,CAAC,CAmCZ"}
1
+ {"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/request.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAGvE;;GAEG;AACH,wBAAsB,OAAO,CAAC,CAAC,EAC9B,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC,CAAC,CAAC,CAgCZ"}
@@ -1 +1 @@
1
- {"version":3,"file":"streamingRequest.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/streamingRequest.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAKvE;;GAEG;AACH,wBAAuB,gBAAgB,CAAC,CAAC,EACxC,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,cAAc,CAAC,CAAC,CAAC,CAsFnB"}
1
+ {"version":3,"file":"streamingRequest.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/streamingRequest.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAKvE;;GAEG;AACH,wBAAuB,gBAAgB,CAAC,CAAC,EACxC,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,IAAI,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IAC9B,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,cAAc,CAAC,CAAC,CAAC,CAmFnB"}
@@ -1 +1 @@
1
- {"version":3,"file":"textToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/textToImage.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAqB,MAAM,oBAAoB,CAAC;AAE9E,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAIrD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG,gBAAgB,CAAC;AAW1D;;;GAGG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAoCzF"}
1
+ {"version":3,"file":"textToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/textToImage.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAqB,MAAM,oBAAoB,CAAC;AAE9E,OAAO,KAAK,EAAE,QAAQ,EAAqB,OAAO,EAAE,MAAM,aAAa,CAAC;AAIxE,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG,gBAAgB,CAAC;AA0B1D;;;GAGG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAqCzF"}
@@ -1 +1 @@
1
- {"version":3,"file":"chatCompletion.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/chatCompletion.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEpF;;GAEG;AACH,wBAAsB,cAAc,CACnC,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,oBAAoB,CAAC,CAoB/B"}
1
+ {"version":3,"file":"chatCompletion.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/chatCompletion.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEpF;;GAEG;AACH,wBAAsB,cAAc,CACnC,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,oBAAoB,CAAC,CAuB/B"}
@@ -5,25 +5,9 @@ import type { ChatCompletionInput, PipelineType } from "@huggingface/tasks";
5
5
  export type ModelId = string;
6
6
  export interface Options {
7
7
  /**
8
- * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
8
+ * (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters.
9
9
  */
10
10
  retry_on_error?: boolean;
11
- /**
12
- * (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
13
- */
14
- use_cache?: boolean;
15
- /**
16
- * (Default: false). Boolean. Do not load the model if it's not already available.
17
- */
18
- dont_load_model?: boolean;
19
- /**
20
- * (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
21
- */
22
- use_gpu?: boolean;
23
- /**
24
- * (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
25
- */
26
- wait_for_model?: boolean;
27
11
  /**
28
12
  * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
29
13
  */
@@ -38,7 +22,7 @@ export interface Options {
38
22
  includeCredentials?: string | boolean;
39
23
  }
40
24
  export type InferenceTask = Exclude<PipelineType, "other">;
41
- export declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "hf-inference", "replicate", "sambanova", "together"];
25
+ export declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "nebius", "hf-inference", "replicate", "sambanova", "together"];
42
26
  export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
43
27
  export interface BaseArgs {
44
28
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAE5E;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,eAAO,MAAM,mBAAmB,2FAOtB,CAAC;AACX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAE5E;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IAEzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,eAAO,MAAM,mBAAmB,qGAQtB,CAAC;AACX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "3.3.0",
3
+ "version": "3.3.2",
4
4
  "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
package/src/config.ts CHANGED
@@ -1,2 +1 @@
1
1
  export const HF_HUB_URL = "https://huggingface.co";
2
- export const HF_ROUTER_URL = "https://router.huggingface.co";
@@ -1,5 +1,6 @@
1
- import { HF_HUB_URL, HF_ROUTER_URL } from "../config";
1
+ import { HF_HUB_URL } from "../config";
2
2
  import { FAL_AI_API_BASE_URL } from "../providers/fal-ai";
3
+ import { NEBIUS_API_BASE_URL } from "../providers/nebius";
3
4
  import { REPLICATE_API_BASE_URL } from "../providers/replicate";
4
5
  import { SAMBANOVA_API_BASE_URL } from "../providers/sambanova";
5
6
  import { TOGETHER_API_BASE_URL } from "../providers/together";
@@ -10,7 +11,7 @@ import { isUrl } from "./isUrl";
10
11
  import { version as packageVersion, name as packageName } from "../../package.json";
11
12
  import { getProviderModelId } from "./getProviderModelId";
12
13
 
13
- const HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
14
+ const HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_HUB_URL}/api/inference-proxy/{{PROVIDER}}`;
14
15
 
15
16
  /**
16
17
  * Lazy-loaded from huggingface.co/api/tasks when needed
@@ -38,8 +39,7 @@ export async function makeRequestOptions(
38
39
  let otherArgs = remainingArgs;
39
40
  const provider = maybeProvider ?? "hf-inference";
40
41
 
41
- const { forceTask, includeCredentials, taskHint, wait_for_model, use_cache, dont_load_model, chatCompletion } =
42
- options ?? {};
42
+ const { forceTask, includeCredentials, taskHint, chatCompletion } = options ?? {};
43
43
 
44
44
  if (endpointUrl && provider !== "hf-inference") {
45
45
  throw new Error(`Cannot use endpointUrl with a third-party provider.`);
@@ -101,18 +101,6 @@ export async function makeRequestOptions(
101
101
  headers["Content-Type"] = "application/json";
102
102
  }
103
103
 
104
- if (provider === "hf-inference") {
105
- if (wait_for_model) {
106
- headers["X-Wait-For-Model"] = "true";
107
- }
108
- if (use_cache === false) {
109
- headers["X-Use-Cache"] = "false";
110
- }
111
- if (dont_load_model) {
112
- headers["X-Load-Model"] = "0";
113
- }
114
- }
115
-
116
104
  if (provider === "replicate") {
117
105
  headers["Prefer"] = "wait";
118
106
  }
@@ -143,7 +131,7 @@ export async function makeRequestOptions(
143
131
  ? args.data
144
132
  : JSON.stringify({
145
133
  ...otherArgs,
146
- ...(chatCompletion || provider === "together" ? { model } : undefined),
134
+ ...(chatCompletion || provider === "together" || provider === "nebius" ? { model } : undefined),
147
135
  }),
148
136
  ...(credentials ? { credentials } : undefined),
149
137
  signal: options?.signal,
@@ -172,6 +160,22 @@ function makeUrl(params: {
172
160
  : FAL_AI_API_BASE_URL;
173
161
  return `${baseUrl}/${params.model}`;
174
162
  }
163
+ case "nebius": {
164
+ const baseUrl = shouldProxy
165
+ ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider)
166
+ : NEBIUS_API_BASE_URL;
167
+
168
+ if (params.taskHint === "text-to-image") {
169
+ return `${baseUrl}/v1/images/generations`;
170
+ }
171
+ if (params.taskHint === "text-generation") {
172
+ if (params.chatCompletion) {
173
+ return `${baseUrl}/v1/chat/completions`;
174
+ }
175
+ return `${baseUrl}/v1/completions`;
176
+ }
177
+ return baseUrl;
178
+ }
175
179
  case "replicate": {
176
180
  const baseUrl = shouldProxy
177
181
  ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider)
@@ -19,6 +19,7 @@ export const HARDCODED_MODEL_ID_MAPPING: Record<InferenceProvider, Record<ModelI
19
19
  "fal-ai": {},
20
20
  "fireworks-ai": {},
21
21
  "hf-inference": {},
22
+ nebius: {},
22
23
  replicate: {},
23
24
  sambanova: {},
24
25
  together: {},
@@ -0,0 +1,18 @@
1
+ export const NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
2
+
3
+ /**
4
+ * See the registered mapping of HF model ID => Nebius model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/nebius/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Nebius and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Nebius, please open an issue on the present repo
15
+ * and we will tag Nebius team members.
16
+ *
17
+ * Thanks!
18
+ */
@@ -18,11 +18,8 @@ export async function request<T>(
18
18
  const { url, info } = await makeRequestOptions(args, options);
19
19
  const response = await (options?.fetch ?? fetch)(url, info);
20
20
 
21
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
22
- return request(args, {
23
- ...options,
24
- wait_for_model: true,
25
- });
21
+ if (options?.retry_on_error !== false && response.status === 503) {
22
+ return request(args, options);
26
23
  }
27
24
 
28
25
  if (!response.ok) {
@@ -20,11 +20,8 @@ export async function* streamingRequest<T>(
20
20
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
21
21
  const response = await (options?.fetch ?? fetch)(url, info);
22
22
 
23
- if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
24
- return yield* streamingRequest(args, {
25
- ...options,
26
- wait_for_model: true,
27
- });
23
+ if (options?.retry_on_error !== false && response.status === 503) {
24
+ return yield* streamingRequest(args, options);
28
25
  }
29
26
  if (!response.ok) {
30
27
  if (response.headers.get("Content-Type")?.startsWith("application/json")) {
@@ -1,6 +1,6 @@
1
1
  import type { TextToImageInput, TextToImageOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
- import type { BaseArgs, Options } from "../../types";
3
+ import type { BaseArgs, InferenceProvider, Options } from "../../types";
4
4
  import { omit } from "../../utils/omit";
5
5
  import { request } from "../custom/request";
6
6
 
@@ -15,24 +15,40 @@ interface OutputUrlImageGeneration {
15
15
  output: string[];
16
16
  }
17
17
 
18
+ function getResponseFormatArg(provider: InferenceProvider) {
19
+ switch (provider) {
20
+ case "fal-ai":
21
+ return { sync_mode: true };
22
+ case "nebius":
23
+ return { response_format: "b64_json" };
24
+ case "replicate":
25
+ return undefined;
26
+ case "together":
27
+ return { response_format: "base64" };
28
+ default:
29
+ return undefined;
30
+ }
31
+ }
32
+
18
33
  /**
19
34
  * This task reads some text input and outputs an image.
20
35
  * Recommended model: stabilityai/stable-diffusion-2
21
36
  */
22
37
  export async function textToImage(args: TextToImageArgs, options?: Options): Promise<Blob> {
23
38
  const payload =
24
- args.provider === "together" || args.provider === "fal-ai" || args.provider === "replicate"
25
- ? {
39
+ !args.provider || args.provider === "hf-inference" || args.provider === "sambanova"
40
+ ? args
41
+ : {
26
42
  ...omit(args, ["inputs", "parameters"]),
27
43
  ...args.parameters,
28
- ...(args.provider !== "replicate" ? { response_format: "base64" } : undefined),
44
+ ...getResponseFormatArg(args.provider),
29
45
  prompt: args.inputs,
30
- }
31
- : args;
46
+ };
32
47
  const res = await request<TextToImageOutput | Base64ImageGeneration | OutputUrlImageGeneration>(payload, {
33
48
  ...options,
34
49
  taskHint: "text-to-image",
35
50
  });
51
+
36
52
  if (res && typeof res === "object") {
37
53
  if (args.provider === "fal-ai" && "images" in res && Array.isArray(res.images) && res.images[0].url) {
38
54
  const image = await fetch(res.images[0].url);
@@ -15,14 +15,17 @@ export async function chatCompletion(
15
15
  taskHint: "text-generation",
16
16
  chatCompletion: true,
17
17
  });
18
+
18
19
  const isValidOutput =
19
20
  typeof res === "object" &&
20
21
  Array.isArray(res?.choices) &&
21
22
  typeof res?.created === "number" &&
22
23
  typeof res?.id === "string" &&
23
24
  typeof res?.model === "string" &&
24
- /// Together.ai does not output a system_fingerprint
25
- (res.system_fingerprint === undefined || typeof res.system_fingerprint === "string") &&
25
+ /// Together.ai and Nebius do not output a system_fingerprint
26
+ (res.system_fingerprint === undefined ||
27
+ res.system_fingerprint === null ||
28
+ typeof res.system_fingerprint === "string") &&
26
29
  typeof res?.usage === "object";
27
30
 
28
31
  if (!isValidOutput) {
package/src/types.ts CHANGED
@@ -7,26 +7,10 @@ export type ModelId = string;
7
7
 
8
8
  export interface Options {
9
9
  /**
10
- * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
10
+ * (Default: true) Boolean. If a request 503s, the request will be retried with the same parameters.
11
11
  */
12
12
  retry_on_error?: boolean;
13
- /**
14
- * (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
15
- */
16
- use_cache?: boolean;
17
- /**
18
- * (Default: false). Boolean. Do not load the model if it's not already available.
19
- */
20
- dont_load_model?: boolean;
21
- /**
22
- * (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
23
- */
24
- use_gpu?: boolean;
25
13
 
26
- /**
27
- * (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
28
- */
29
- wait_for_model?: boolean;
30
14
  /**
31
15
  * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
32
16
  */
@@ -47,6 +31,7 @@ export type InferenceTask = Exclude<PipelineType, "other">;
47
31
  export const INFERENCE_PROVIDERS = [
48
32
  "fal-ai",
49
33
  "fireworks-ai",
34
+ "nebius",
50
35
  "hf-inference",
51
36
  "replicate",
52
37
  "sambanova",