@huggingface/inference 3.1.6 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -117,7 +117,7 @@ for await (const output of hf.textGenerationStream({
117
117
 
118
118
  ### Text Generation (Chat Completion API Compatible)
119
119
 
120
- Using the `chatCompletion` method, you can generate text with models compatible with the OpenAI Chat Completion API. All models served by [TGI](https://api-inference.huggingface.co/framework/text-generation-inference) on Hugging Face support Messages API.
120
+ Using the `chatCompletion` method, you can generate text with models compatible with the OpenAI Chat Completion API. All models served by [TGI](https://huggingface.co/docs/text-generation-inference/) on Hugging Face support Messages API.
121
121
 
122
122
  [Demo](https://huggingface.co/spaces/huggingfacejs/streaming-chat-completion)
123
123
 
@@ -611,7 +611,7 @@ const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the
611
611
 
612
612
  // Chat Completion Example
613
613
  const ep = hf.endpoint(
614
- "https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct"
614
+ "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct"
615
615
  );
616
616
  const stream = ep.chatCompletionStream({
617
617
  model: "tgi",
package/dist/index.cjs CHANGED
@@ -20,14 +20,10 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
20
20
  // src/index.ts
21
21
  var src_exports = {};
22
22
  __export(src_exports, {
23
- FAL_AI_SUPPORTED_MODEL_IDS: () => FAL_AI_SUPPORTED_MODEL_IDS,
24
23
  HfInference: () => HfInference,
25
24
  HfInferenceEndpoint: () => HfInferenceEndpoint,
26
25
  INFERENCE_PROVIDERS: () => INFERENCE_PROVIDERS,
27
26
  InferenceOutputError: () => InferenceOutputError,
28
- REPLICATE_SUPPORTED_MODEL_IDS: () => REPLICATE_SUPPORTED_MODEL_IDS,
29
- SAMBANOVA_SUPPORTED_MODEL_IDS: () => SAMBANOVA_SUPPORTED_MODEL_IDS,
30
- TOGETHER_SUPPORTED_MODEL_IDS: () => TOGETHER_SUPPORTED_MODEL_IDS,
31
27
  audioClassification: () => audioClassification,
32
28
  audioToAudio: () => audioToAudio,
33
29
  automaticSpeechRecognition: () => automaticSpeechRecognition,
@@ -102,128 +98,19 @@ __export(tasks_exports, {
102
98
 
103
99
  // src/config.ts
104
100
  var HF_HUB_URL = "https://huggingface.co";
101
+ var HF_ROUTER_URL = "https://router.huggingface.co";
105
102
 
106
103
  // src/providers/fal-ai.ts
107
104
  var FAL_AI_API_BASE_URL = "https://fal.run";
108
- var FAL_AI_SUPPORTED_MODEL_IDS = {
109
- "text-to-image": {
110
- "black-forest-labs/FLUX.1-schnell": "fal-ai/flux/schnell",
111
- "black-forest-labs/FLUX.1-dev": "fal-ai/flux/dev",
112
- "playgroundai/playground-v2.5-1024px-aesthetic": "fal-ai/playground-v25",
113
- "ByteDance/SDXL-Lightning": "fal-ai/lightning-models",
114
- "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": "fal-ai/pixart-sigma",
115
- "stabilityai/stable-diffusion-3-medium": "fal-ai/stable-diffusion-v3-medium",
116
- "Warlord-K/Sana-1024": "fal-ai/sana",
117
- "fal/AuraFlow-v0.2": "fal-ai/aura-flow",
118
- "stabilityai/stable-diffusion-3.5-large": "fal-ai/stable-diffusion-v35-large",
119
- "stabilityai/stable-diffusion-3.5-large-turbo": "fal-ai/stable-diffusion-v35-large/turbo",
120
- "stabilityai/stable-diffusion-3.5-medium": "fal-ai/stable-diffusion-v35-medium",
121
- "Kwai-Kolors/Kolors": "fal-ai/kolors"
122
- },
123
- "automatic-speech-recognition": {
124
- "openai/whisper-large-v3": "fal-ai/whisper"
125
- },
126
- "text-to-video": {
127
- "genmo/mochi-1-preview": "fal-ai/mochi-v1",
128
- "tencent/HunyuanVideo": "fal-ai/hunyuan-video",
129
- "THUDM/CogVideoX-5b": "fal-ai/cogvideox-5b",
130
- "Lightricks/LTX-Video": "fal-ai/ltx-video"
131
- }
132
- };
133
105
 
134
106
  // src/providers/replicate.ts
135
107
  var REPLICATE_API_BASE_URL = "https://api.replicate.com";
136
- var REPLICATE_SUPPORTED_MODEL_IDS = {
137
- "text-to-image": {
138
- "black-forest-labs/FLUX.1-dev": "black-forest-labs/flux-dev",
139
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/flux-schnell",
140
- "ByteDance/Hyper-SD": "bytedance/hyper-flux-16step:382cf8959fb0f0d665b26e7e80b8d6dc3faaef1510f14ce017e8c732bb3d1eb7",
141
- "ByteDance/SDXL-Lightning": "bytedance/sdxl-lightning-4step:5599ed30703defd1d160a25a63321b4dec97101d98b4674bcc56e41f62f35637",
142
- "playgroundai/playground-v2.5-1024px-aesthetic": "playgroundai/playground-v2.5-1024px-aesthetic:a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
143
- "stabilityai/stable-diffusion-3.5-large-turbo": "stability-ai/stable-diffusion-3.5-large-turbo",
144
- "stabilityai/stable-diffusion-3.5-large": "stability-ai/stable-diffusion-3.5-large",
145
- "stabilityai/stable-diffusion-3.5-medium": "stability-ai/stable-diffusion-3.5-medium",
146
- "stabilityai/stable-diffusion-xl-base-1.0": "stability-ai/sdxl:7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
147
- },
148
- "text-to-speech": {
149
- "OuteAI/OuteTTS-0.3-500M": "jbilcke/oute-tts:3c645149db020c85d080e2f8cfe482a0e68189a922cde964fa9e80fb179191f3",
150
- "hexgrad/Kokoro-82M": "jaaari/kokoro-82m:dfdf537ba482b029e0a761699e6f55e9162cfd159270bfe0e44857caa5f275a6"
151
- },
152
- "text-to-video": {
153
- "genmo/mochi-1-preview": "genmoai/mochi-1:1944af04d098ef69bed7f9d335d102e652203f268ec4aaa2d836f6217217e460"
154
- }
155
- };
156
108
 
157
109
  // src/providers/sambanova.ts
158
110
  var SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
159
- var SAMBANOVA_SUPPORTED_MODEL_IDS = {
160
- /** Chat completion / conversational */
161
- conversational: {
162
- "deepseek-ai/DeepSeek-Distill-R1-Llama-70B": "DeepSeek-Distill-R1-Llama-70B",
163
- "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
164
- "Qwen/Qwen2.5-72B-Instruct": "Qwen2.5-72B-Instruct",
165
- "Qwen/QwQ-32B-Preview": "QwQ-32B-Preview",
166
- "meta-llama/Llama-3.3-70B-Instruct": "Meta-Llama-3.3-70B-Instruct",
167
- "meta-llama/Llama-3.2-1B-Instruct": "Meta-Llama-3.2-1B-Instruct",
168
- "meta-llama/Llama-3.2-3B-Instruct": "Meta-Llama-3.2-3B-Instruct",
169
- "meta-llama/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
170
- "meta-llama/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
171
- "meta-llama/Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
172
- "meta-llama/Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
173
- "meta-llama/Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
174
- "meta-llama/Llama-Guard-3-8B": "Meta-Llama-Guard-3-8B"
175
- }
176
- };
177
111
 
178
112
  // src/providers/together.ts
179
113
  var TOGETHER_API_BASE_URL = "https://api.together.xyz";
180
- var TOGETHER_SUPPORTED_MODEL_IDS = {
181
- "text-to-image": {
182
- "black-forest-labs/FLUX.1-Canny-dev": "black-forest-labs/FLUX.1-canny",
183
- "black-forest-labs/FLUX.1-Depth-dev": "black-forest-labs/FLUX.1-depth",
184
- "black-forest-labs/FLUX.1-dev": "black-forest-labs/FLUX.1-dev",
185
- "black-forest-labs/FLUX.1-Redux-dev": "black-forest-labs/FLUX.1-redux",
186
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-pro",
187
- "stabilityai/stable-diffusion-xl-base-1.0": "stabilityai/stable-diffusion-xl-base-1.0"
188
- },
189
- conversational: {
190
- "databricks/dbrx-instruct": "databricks/dbrx-instruct",
191
- "deepseek-ai/DeepSeek-R1": "deepseek-ai/DeepSeek-R1",
192
- "deepseek-ai/DeepSeek-V3": "deepseek-ai/DeepSeek-V3",
193
- "deepseek-ai/deepseek-llm-67b-chat": "deepseek-ai/deepseek-llm-67b-chat",
194
- "google/gemma-2-9b-it": "google/gemma-2-9b-it",
195
- "google/gemma-2b-it": "google/gemma-2-27b-it",
196
- "meta-llama/Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
197
- "meta-llama/Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
198
- "meta-llama/Llama-3.2-11B-Vision-Instruct": "meta-llama/Llama-Vision-Free",
199
- "meta-llama/Llama-3.2-3B-Instruct": "meta-llama/Llama-3.2-3B-Instruct-Turbo",
200
- "meta-llama/Llama-3.2-90B-Vision-Instruct": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
201
- "meta-llama/Llama-3.3-70B-Instruct": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
202
- "meta-llama/Meta-Llama-3-70B-Instruct": "meta-llama/Llama-3-70b-chat-hf",
203
- "meta-llama/Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
204
- "meta-llama/Meta-Llama-3.1-405B-Instruct": "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
205
- "meta-llama/Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
206
- "meta-llama/Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo-128K",
207
- "microsoft/WizardLM-2-8x22B": "microsoft/WizardLM-2-8x22B",
208
- "mistralai/Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
209
- "mistralai/Mistral-Small-24B-Instruct-2501": "mistralai/Mistral-Small-24B-Instruct-2501",
210
- "mistralai/Mixtral-8x22B-Instruct-v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1",
211
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
212
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
213
- "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
214
- "Qwen/Qwen2-72B-Instruct": "Qwen/Qwen2-72B-Instruct",
215
- "Qwen/Qwen2.5-72B-Instruct": "Qwen/Qwen2.5-72B-Instruct-Turbo",
216
- "Qwen/Qwen2.5-7B-Instruct": "Qwen/Qwen2.5-7B-Instruct-Turbo",
217
- "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen/Qwen2.5-Coder-32B-Instruct",
218
- "Qwen/QwQ-32B-Preview": "Qwen/QwQ-32B-Preview",
219
- "scb10x/llama-3-typhoon-v1.5-8b-instruct": "scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct",
220
- "scb10x/llama-3-typhoon-v1.5x-70b-instruct-awq": "scb10x/scb10x-llama3-typhoon-v1-5x-4f316"
221
- },
222
- "text-generation": {
223
- "meta-llama/Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
224
- "mistralai/Mixtral-8x7B-v0.1": "mistralai/Mixtral-8x7B-v0.1"
225
- }
226
- };
227
114
 
228
115
  // src/lib/isUrl.ts
229
116
  function isUrl(modelOrUrl) {
@@ -232,10 +119,62 @@ function isUrl(modelOrUrl) {
232
119
 
233
120
  // package.json
234
121
  var name = "@huggingface/inference";
235
- var version = "3.1.6";
122
+ var version = "3.2.0";
123
+
124
+ // src/providers/consts.ts
125
+ var HARDCODED_MODEL_ID_MAPPING = {
126
+ /**
127
+ * "HF model ID" => "Model ID on Inference Provider's side"
128
+ */
129
+ // "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
130
+ };
131
+
132
+ // src/lib/getProviderModelId.ts
133
+ var inferenceProviderMappingCache = /* @__PURE__ */ new Map();
134
+ async function getProviderModelId(params, args, options = {}) {
135
+ if (params.provider === "hf-inference") {
136
+ return params.model;
137
+ }
138
+ if (!options.taskHint) {
139
+ throw new Error("taskHint must be specified when using a third-party provider");
140
+ }
141
+ const task = options.taskHint === "text-generation" && options.chatCompletion ? "conversational" : options.taskHint;
142
+ if (HARDCODED_MODEL_ID_MAPPING[params.model]) {
143
+ return HARDCODED_MODEL_ID_MAPPING[params.model];
144
+ }
145
+ let inferenceProviderMapping;
146
+ if (inferenceProviderMappingCache.has(params.model)) {
147
+ inferenceProviderMapping = inferenceProviderMappingCache.get(params.model);
148
+ } else {
149
+ inferenceProviderMapping = await (options?.fetch ?? fetch)(
150
+ `${HF_HUB_URL}/api/models/${params.model}?expand[]=inferenceProviderMapping`,
151
+ {
152
+ headers: args.accessToken?.startsWith("hf_") ? { Authorization: `Bearer ${args.accessToken}` } : {}
153
+ }
154
+ ).then((resp) => resp.json()).then((json) => json.inferenceProviderMapping).catch(() => null);
155
+ }
156
+ if (!inferenceProviderMapping) {
157
+ throw new Error(`We have not been able to find inference provider information for model ${params.model}.`);
158
+ }
159
+ const providerMapping = inferenceProviderMapping[params.provider];
160
+ if (providerMapping) {
161
+ if (providerMapping.task !== task) {
162
+ throw new Error(
163
+ `Model ${params.model} is not supported for task ${task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`
164
+ );
165
+ }
166
+ if (providerMapping.status === "staging") {
167
+ console.warn(
168
+ `Model ${params.model} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
169
+ );
170
+ }
171
+ return providerMapping.providerId;
172
+ }
173
+ throw new Error(`Model ${params.model} is not supported provider ${params.provider}.`);
174
+ }
236
175
 
237
176
  // src/lib/makeRequestOptions.ts
238
- var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_HUB_URL}/api/inference-proxy/{{PROVIDER}}`;
177
+ var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
239
178
  var tasks = null;
240
179
  async function makeRequestOptions(args, options) {
241
180
  const { accessToken, endpointUrl, provider: maybeProvider, model: maybeModel, ...remainingArgs } = args;
@@ -251,16 +190,15 @@ async function makeRequestOptions(args, options) {
251
190
  if (maybeModel && isUrl(maybeModel)) {
252
191
  throw new Error(`Model URLs are no longer supported. Use endpointUrl instead.`);
253
192
  }
254
- let model;
255
- if (!maybeModel) {
256
- if (taskHint) {
257
- model = mapModel({ model: await loadDefaultModel(taskHint), provider, taskHint, chatCompletion: chatCompletion2 });
258
- } else {
259
- throw new Error("No model provided, and no default model found for this task");
260
- }
261
- } else {
262
- model = mapModel({ model: maybeModel, provider, taskHint, chatCompletion: chatCompletion2 });
193
+ if (!maybeModel && !taskHint) {
194
+ throw new Error("No model provided, and no task has been specified.");
263
195
  }
196
+ const hfModel = maybeModel ?? await loadDefaultModel(taskHint);
197
+ const model = await getProviderModelId({ model: hfModel, provider }, args, {
198
+ taskHint,
199
+ chatCompletion: chatCompletion2,
200
+ fetch: options?.fetch
201
+ });
264
202
  const authMethod = accessToken ? accessToken.startsWith("hf_") ? "hf-token" : "provider-key" : includeCredentials === "include" ? "credentials-include" : "none";
265
203
  const url = endpointUrl ? chatCompletion2 ? endpointUrl + `/v1/chat/completions` : endpointUrl : makeUrl({
266
204
  authMethod,
@@ -316,31 +254,6 @@ async function makeRequestOptions(args, options) {
316
254
  };
317
255
  return { url, info };
318
256
  }
319
- function mapModel(params) {
320
- if (params.provider === "hf-inference") {
321
- return params.model;
322
- }
323
- if (!params.taskHint) {
324
- throw new Error("taskHint must be specified when using a third-party provider");
325
- }
326
- const task = params.taskHint === "text-generation" && params.chatCompletion ? "conversational" : params.taskHint;
327
- const model = (() => {
328
- switch (params.provider) {
329
- case "fal-ai":
330
- return FAL_AI_SUPPORTED_MODEL_IDS[task]?.[params.model];
331
- case "replicate":
332
- return REPLICATE_SUPPORTED_MODEL_IDS[task]?.[params.model];
333
- case "sambanova":
334
- return SAMBANOVA_SUPPORTED_MODEL_IDS[task]?.[params.model];
335
- case "together":
336
- return TOGETHER_SUPPORTED_MODEL_IDS[task]?.[params.model];
337
- }
338
- })();
339
- if (!model) {
340
- throw new Error(`Model ${params.model} is not supported for task ${task} and provider ${params.provider}`);
341
- }
342
- return model;
343
- }
344
257
  function makeUrl(params) {
345
258
  if (params.authMethod === "none" && params.provider !== "hf-inference") {
346
259
  throw new Error("Authentication is required when requesting a third-party provider. Please provide accessToken");
@@ -1359,14 +1272,10 @@ var HfInferenceEndpoint = class {
1359
1272
  var INFERENCE_PROVIDERS = ["fal-ai", "replicate", "sambanova", "together", "hf-inference"];
1360
1273
  // Annotate the CommonJS export names for ESM import in node:
1361
1274
  0 && (module.exports = {
1362
- FAL_AI_SUPPORTED_MODEL_IDS,
1363
1275
  HfInference,
1364
1276
  HfInferenceEndpoint,
1365
1277
  INFERENCE_PROVIDERS,
1366
1278
  InferenceOutputError,
1367
- REPLICATE_SUPPORTED_MODEL_IDS,
1368
- SAMBANOVA_SUPPORTED_MODEL_IDS,
1369
- TOGETHER_SUPPORTED_MODEL_IDS,
1370
1279
  audioClassification,
1371
1280
  audioToAudio,
1372
1281
  automaticSpeechRecognition,
package/dist/index.js CHANGED
@@ -43,128 +43,19 @@ __export(tasks_exports, {
43
43
 
44
44
  // src/config.ts
45
45
  var HF_HUB_URL = "https://huggingface.co";
46
+ var HF_ROUTER_URL = "https://router.huggingface.co";
46
47
 
47
48
  // src/providers/fal-ai.ts
48
49
  var FAL_AI_API_BASE_URL = "https://fal.run";
49
- var FAL_AI_SUPPORTED_MODEL_IDS = {
50
- "text-to-image": {
51
- "black-forest-labs/FLUX.1-schnell": "fal-ai/flux/schnell",
52
- "black-forest-labs/FLUX.1-dev": "fal-ai/flux/dev",
53
- "playgroundai/playground-v2.5-1024px-aesthetic": "fal-ai/playground-v25",
54
- "ByteDance/SDXL-Lightning": "fal-ai/lightning-models",
55
- "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": "fal-ai/pixart-sigma",
56
- "stabilityai/stable-diffusion-3-medium": "fal-ai/stable-diffusion-v3-medium",
57
- "Warlord-K/Sana-1024": "fal-ai/sana",
58
- "fal/AuraFlow-v0.2": "fal-ai/aura-flow",
59
- "stabilityai/stable-diffusion-3.5-large": "fal-ai/stable-diffusion-v35-large",
60
- "stabilityai/stable-diffusion-3.5-large-turbo": "fal-ai/stable-diffusion-v35-large/turbo",
61
- "stabilityai/stable-diffusion-3.5-medium": "fal-ai/stable-diffusion-v35-medium",
62
- "Kwai-Kolors/Kolors": "fal-ai/kolors"
63
- },
64
- "automatic-speech-recognition": {
65
- "openai/whisper-large-v3": "fal-ai/whisper"
66
- },
67
- "text-to-video": {
68
- "genmo/mochi-1-preview": "fal-ai/mochi-v1",
69
- "tencent/HunyuanVideo": "fal-ai/hunyuan-video",
70
- "THUDM/CogVideoX-5b": "fal-ai/cogvideox-5b",
71
- "Lightricks/LTX-Video": "fal-ai/ltx-video"
72
- }
73
- };
74
50
 
75
51
  // src/providers/replicate.ts
76
52
  var REPLICATE_API_BASE_URL = "https://api.replicate.com";
77
- var REPLICATE_SUPPORTED_MODEL_IDS = {
78
- "text-to-image": {
79
- "black-forest-labs/FLUX.1-dev": "black-forest-labs/flux-dev",
80
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/flux-schnell",
81
- "ByteDance/Hyper-SD": "bytedance/hyper-flux-16step:382cf8959fb0f0d665b26e7e80b8d6dc3faaef1510f14ce017e8c732bb3d1eb7",
82
- "ByteDance/SDXL-Lightning": "bytedance/sdxl-lightning-4step:5599ed30703defd1d160a25a63321b4dec97101d98b4674bcc56e41f62f35637",
83
- "playgroundai/playground-v2.5-1024px-aesthetic": "playgroundai/playground-v2.5-1024px-aesthetic:a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
84
- "stabilityai/stable-diffusion-3.5-large-turbo": "stability-ai/stable-diffusion-3.5-large-turbo",
85
- "stabilityai/stable-diffusion-3.5-large": "stability-ai/stable-diffusion-3.5-large",
86
- "stabilityai/stable-diffusion-3.5-medium": "stability-ai/stable-diffusion-3.5-medium",
87
- "stabilityai/stable-diffusion-xl-base-1.0": "stability-ai/sdxl:7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
88
- },
89
- "text-to-speech": {
90
- "OuteAI/OuteTTS-0.3-500M": "jbilcke/oute-tts:3c645149db020c85d080e2f8cfe482a0e68189a922cde964fa9e80fb179191f3",
91
- "hexgrad/Kokoro-82M": "jaaari/kokoro-82m:dfdf537ba482b029e0a761699e6f55e9162cfd159270bfe0e44857caa5f275a6"
92
- },
93
- "text-to-video": {
94
- "genmo/mochi-1-preview": "genmoai/mochi-1:1944af04d098ef69bed7f9d335d102e652203f268ec4aaa2d836f6217217e460"
95
- }
96
- };
97
53
 
98
54
  // src/providers/sambanova.ts
99
55
  var SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
100
- var SAMBANOVA_SUPPORTED_MODEL_IDS = {
101
- /** Chat completion / conversational */
102
- conversational: {
103
- "deepseek-ai/DeepSeek-Distill-R1-Llama-70B": "DeepSeek-Distill-R1-Llama-70B",
104
- "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
105
- "Qwen/Qwen2.5-72B-Instruct": "Qwen2.5-72B-Instruct",
106
- "Qwen/QwQ-32B-Preview": "QwQ-32B-Preview",
107
- "meta-llama/Llama-3.3-70B-Instruct": "Meta-Llama-3.3-70B-Instruct",
108
- "meta-llama/Llama-3.2-1B-Instruct": "Meta-Llama-3.2-1B-Instruct",
109
- "meta-llama/Llama-3.2-3B-Instruct": "Meta-Llama-3.2-3B-Instruct",
110
- "meta-llama/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
111
- "meta-llama/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
112
- "meta-llama/Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
113
- "meta-llama/Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
114
- "meta-llama/Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
115
- "meta-llama/Llama-Guard-3-8B": "Meta-Llama-Guard-3-8B"
116
- }
117
- };
118
56
 
119
57
  // src/providers/together.ts
120
58
  var TOGETHER_API_BASE_URL = "https://api.together.xyz";
121
- var TOGETHER_SUPPORTED_MODEL_IDS = {
122
- "text-to-image": {
123
- "black-forest-labs/FLUX.1-Canny-dev": "black-forest-labs/FLUX.1-canny",
124
- "black-forest-labs/FLUX.1-Depth-dev": "black-forest-labs/FLUX.1-depth",
125
- "black-forest-labs/FLUX.1-dev": "black-forest-labs/FLUX.1-dev",
126
- "black-forest-labs/FLUX.1-Redux-dev": "black-forest-labs/FLUX.1-redux",
127
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-pro",
128
- "stabilityai/stable-diffusion-xl-base-1.0": "stabilityai/stable-diffusion-xl-base-1.0"
129
- },
130
- conversational: {
131
- "databricks/dbrx-instruct": "databricks/dbrx-instruct",
132
- "deepseek-ai/DeepSeek-R1": "deepseek-ai/DeepSeek-R1",
133
- "deepseek-ai/DeepSeek-V3": "deepseek-ai/DeepSeek-V3",
134
- "deepseek-ai/deepseek-llm-67b-chat": "deepseek-ai/deepseek-llm-67b-chat",
135
- "google/gemma-2-9b-it": "google/gemma-2-9b-it",
136
- "google/gemma-2b-it": "google/gemma-2-27b-it",
137
- "meta-llama/Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
138
- "meta-llama/Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
139
- "meta-llama/Llama-3.2-11B-Vision-Instruct": "meta-llama/Llama-Vision-Free",
140
- "meta-llama/Llama-3.2-3B-Instruct": "meta-llama/Llama-3.2-3B-Instruct-Turbo",
141
- "meta-llama/Llama-3.2-90B-Vision-Instruct": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
142
- "meta-llama/Llama-3.3-70B-Instruct": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
143
- "meta-llama/Meta-Llama-3-70B-Instruct": "meta-llama/Llama-3-70b-chat-hf",
144
- "meta-llama/Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
145
- "meta-llama/Meta-Llama-3.1-405B-Instruct": "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
146
- "meta-llama/Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
147
- "meta-llama/Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo-128K",
148
- "microsoft/WizardLM-2-8x22B": "microsoft/WizardLM-2-8x22B",
149
- "mistralai/Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
150
- "mistralai/Mistral-Small-24B-Instruct-2501": "mistralai/Mistral-Small-24B-Instruct-2501",
151
- "mistralai/Mixtral-8x22B-Instruct-v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1",
152
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
153
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
154
- "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
155
- "Qwen/Qwen2-72B-Instruct": "Qwen/Qwen2-72B-Instruct",
156
- "Qwen/Qwen2.5-72B-Instruct": "Qwen/Qwen2.5-72B-Instruct-Turbo",
157
- "Qwen/Qwen2.5-7B-Instruct": "Qwen/Qwen2.5-7B-Instruct-Turbo",
158
- "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen/Qwen2.5-Coder-32B-Instruct",
159
- "Qwen/QwQ-32B-Preview": "Qwen/QwQ-32B-Preview",
160
- "scb10x/llama-3-typhoon-v1.5-8b-instruct": "scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct",
161
- "scb10x/llama-3-typhoon-v1.5x-70b-instruct-awq": "scb10x/scb10x-llama3-typhoon-v1-5x-4f316"
162
- },
163
- "text-generation": {
164
- "meta-llama/Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
165
- "mistralai/Mixtral-8x7B-v0.1": "mistralai/Mixtral-8x7B-v0.1"
166
- }
167
- };
168
59
 
169
60
  // src/lib/isUrl.ts
170
61
  function isUrl(modelOrUrl) {
@@ -173,10 +64,62 @@ function isUrl(modelOrUrl) {
173
64
 
174
65
  // package.json
175
66
  var name = "@huggingface/inference";
176
- var version = "3.1.6";
67
+ var version = "3.2.0";
68
+
69
+ // src/providers/consts.ts
70
+ var HARDCODED_MODEL_ID_MAPPING = {
71
+ /**
72
+ * "HF model ID" => "Model ID on Inference Provider's side"
73
+ */
74
+ // "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
75
+ };
76
+
77
+ // src/lib/getProviderModelId.ts
78
+ var inferenceProviderMappingCache = /* @__PURE__ */ new Map();
79
+ async function getProviderModelId(params, args, options = {}) {
80
+ if (params.provider === "hf-inference") {
81
+ return params.model;
82
+ }
83
+ if (!options.taskHint) {
84
+ throw new Error("taskHint must be specified when using a third-party provider");
85
+ }
86
+ const task = options.taskHint === "text-generation" && options.chatCompletion ? "conversational" : options.taskHint;
87
+ if (HARDCODED_MODEL_ID_MAPPING[params.model]) {
88
+ return HARDCODED_MODEL_ID_MAPPING[params.model];
89
+ }
90
+ let inferenceProviderMapping;
91
+ if (inferenceProviderMappingCache.has(params.model)) {
92
+ inferenceProviderMapping = inferenceProviderMappingCache.get(params.model);
93
+ } else {
94
+ inferenceProviderMapping = await (options?.fetch ?? fetch)(
95
+ `${HF_HUB_URL}/api/models/${params.model}?expand[]=inferenceProviderMapping`,
96
+ {
97
+ headers: args.accessToken?.startsWith("hf_") ? { Authorization: `Bearer ${args.accessToken}` } : {}
98
+ }
99
+ ).then((resp) => resp.json()).then((json) => json.inferenceProviderMapping).catch(() => null);
100
+ }
101
+ if (!inferenceProviderMapping) {
102
+ throw new Error(`We have not been able to find inference provider information for model ${params.model}.`);
103
+ }
104
+ const providerMapping = inferenceProviderMapping[params.provider];
105
+ if (providerMapping) {
106
+ if (providerMapping.task !== task) {
107
+ throw new Error(
108
+ `Model ${params.model} is not supported for task ${task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`
109
+ );
110
+ }
111
+ if (providerMapping.status === "staging") {
112
+ console.warn(
113
+ `Model ${params.model} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
114
+ );
115
+ }
116
+ return providerMapping.providerId;
117
+ }
118
+ throw new Error(`Model ${params.model} is not supported provider ${params.provider}.`);
119
+ }
177
120
 
178
121
  // src/lib/makeRequestOptions.ts
179
- var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_HUB_URL}/api/inference-proxy/{{PROVIDER}}`;
122
+ var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
180
123
  var tasks = null;
181
124
  async function makeRequestOptions(args, options) {
182
125
  const { accessToken, endpointUrl, provider: maybeProvider, model: maybeModel, ...remainingArgs } = args;
@@ -192,16 +135,15 @@ async function makeRequestOptions(args, options) {
192
135
  if (maybeModel && isUrl(maybeModel)) {
193
136
  throw new Error(`Model URLs are no longer supported. Use endpointUrl instead.`);
194
137
  }
195
- let model;
196
- if (!maybeModel) {
197
- if (taskHint) {
198
- model = mapModel({ model: await loadDefaultModel(taskHint), provider, taskHint, chatCompletion: chatCompletion2 });
199
- } else {
200
- throw new Error("No model provided, and no default model found for this task");
201
- }
202
- } else {
203
- model = mapModel({ model: maybeModel, provider, taskHint, chatCompletion: chatCompletion2 });
138
+ if (!maybeModel && !taskHint) {
139
+ throw new Error("No model provided, and no task has been specified.");
204
140
  }
141
+ const hfModel = maybeModel ?? await loadDefaultModel(taskHint);
142
+ const model = await getProviderModelId({ model: hfModel, provider }, args, {
143
+ taskHint,
144
+ chatCompletion: chatCompletion2,
145
+ fetch: options?.fetch
146
+ });
205
147
  const authMethod = accessToken ? accessToken.startsWith("hf_") ? "hf-token" : "provider-key" : includeCredentials === "include" ? "credentials-include" : "none";
206
148
  const url = endpointUrl ? chatCompletion2 ? endpointUrl + `/v1/chat/completions` : endpointUrl : makeUrl({
207
149
  authMethod,
@@ -257,31 +199,6 @@ async function makeRequestOptions(args, options) {
257
199
  };
258
200
  return { url, info };
259
201
  }
260
- function mapModel(params) {
261
- if (params.provider === "hf-inference") {
262
- return params.model;
263
- }
264
- if (!params.taskHint) {
265
- throw new Error("taskHint must be specified when using a third-party provider");
266
- }
267
- const task = params.taskHint === "text-generation" && params.chatCompletion ? "conversational" : params.taskHint;
268
- const model = (() => {
269
- switch (params.provider) {
270
- case "fal-ai":
271
- return FAL_AI_SUPPORTED_MODEL_IDS[task]?.[params.model];
272
- case "replicate":
273
- return REPLICATE_SUPPORTED_MODEL_IDS[task]?.[params.model];
274
- case "sambanova":
275
- return SAMBANOVA_SUPPORTED_MODEL_IDS[task]?.[params.model];
276
- case "together":
277
- return TOGETHER_SUPPORTED_MODEL_IDS[task]?.[params.model];
278
- }
279
- })();
280
- if (!model) {
281
- throw new Error(`Model ${params.model} is not supported for task ${task} and provider ${params.provider}`);
282
- }
283
- return model;
284
- }
285
202
  function makeUrl(params) {
286
203
  if (params.authMethod === "none" && params.provider !== "hf-inference") {
287
204
  throw new Error("Authentication is required when requesting a third-party provider. Please provide accessToken");
@@ -1299,14 +1216,10 @@ var HfInferenceEndpoint = class {
1299
1216
  // src/types.ts
1300
1217
  var INFERENCE_PROVIDERS = ["fal-ai", "replicate", "sambanova", "together", "hf-inference"];
1301
1218
  export {
1302
- FAL_AI_SUPPORTED_MODEL_IDS,
1303
1219
  HfInference,
1304
1220
  HfInferenceEndpoint,
1305
1221
  INFERENCE_PROVIDERS,
1306
1222
  InferenceOutputError,
1307
- REPLICATE_SUPPORTED_MODEL_IDS,
1308
- SAMBANOVA_SUPPORTED_MODEL_IDS,
1309
- TOGETHER_SUPPORTED_MODEL_IDS,
1310
1223
  audioClassification,
1311
1224
  audioToAudio,
1312
1225
  automaticSpeechRecognition,
@@ -1,2 +1,3 @@
1
1
  export declare const HF_HUB_URL = "https://huggingface.co";
2
+ export declare const HF_ROUTER_URL = "https://router.huggingface.co";
2
3
  //# sourceMappingURL=config.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../src/config.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU,2BAA2B,CAAC"}
1
+ {"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../src/config.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU,2BAA2B,CAAC;AACnD,eAAO,MAAM,aAAa,kCAAkC,CAAC"}
@@ -1,10 +1,5 @@
1
- export type { ProviderMapping } from "./providers/types";
2
1
  export { HfInference, HfInferenceEndpoint } from "./HfInference";
3
2
  export { InferenceOutputError } from "./lib/InferenceOutputError";
4
- export { FAL_AI_SUPPORTED_MODEL_IDS } from "./providers/fal-ai";
5
- export { REPLICATE_SUPPORTED_MODEL_IDS } from "./providers/replicate";
6
- export { SAMBANOVA_SUPPORTED_MODEL_IDS } from "./providers/sambanova";
7
- export { TOGETHER_SUPPORTED_MODEL_IDS } from "./providers/together";
8
3
  export * from "./types";
9
4
  export * from "./tasks";
10
5
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,YAAY,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AACzD,OAAO,EAAE,WAAW,EAAE,mBAAmB,EAAE,MAAM,eAAe,CAAC;AACjE,OAAO,EAAE,oBAAoB,EAAE,MAAM,4BAA4B,CAAC;AAClE,OAAO,EAAE,0BAA0B,EAAE,MAAM,oBAAoB,CAAC;AAChE,OAAO,EAAE,6BAA6B,EAAE,MAAM,uBAAuB,CAAC;AACtE,OAAO,EAAE,6BAA6B,EAAE,MAAM,uBAAuB,CAAC;AACtE,OAAO,EAAE,4BAA4B,EAAE,MAAM,sBAAsB,CAAC;AACpE,cAAc,SAAS,CAAC;AACxB,cAAc,SAAS,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,mBAAmB,EAAE,MAAM,eAAe,CAAC;AACjE,OAAO,EAAE,oBAAoB,EAAE,MAAM,4BAA4B,CAAC;AAClE,cAAc,SAAS,CAAC;AACxB,cAAc,SAAS,CAAC"}
@@ -0,0 +1,10 @@
1
+ import type { InferenceProvider, InferenceTask, Options, RequestArgs } from "../types";
2
+ export declare function getProviderModelId(params: {
3
+ model: string;
4
+ provider: InferenceProvider;
5
+ }, args: RequestArgs, options?: {
6
+ taskHint?: InferenceTask;
7
+ chatCompletion?: boolean;
8
+ fetch?: Options["fetch"];
9
+ }): Promise<string>;
10
+ //# sourceMappingURL=getProviderModelId.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"getProviderModelId.d.ts","sourceRoot":"","sources":["../../../src/lib/getProviderModelId.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,iBAAiB,EAAE,aAAa,EAAW,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAShG,wBAAsB,kBAAkB,CACvC,MAAM,EAAE;IACP,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,iBAAiB,CAAC;CAC5B,EACD,IAAI,EAAE,WAAW,EACjB,OAAO,GAAE;IACR,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB,KAAK,CAAC,EAAE,OAAO,CAAC,OAAO,CAAC,CAAC;CACpB,GACJ,OAAO,CAAC,MAAM,CAAC,CAoDjB"}
@@ -1 +1 @@
1
- {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAOA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAYpE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAuH7C"}
1
+ {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAMA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAapE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAqH7C"}
@@ -0,0 +1,10 @@
1
+ import type { ModelId } from "../types";
2
+ type ProviderId = string;
3
+ /**
4
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co
5
+ * for a given Inference Provider,
6
+ * you can add it to the following dictionary, for dev purposes.
7
+ */
8
+ export declare const HARDCODED_MODEL_ID_MAPPING: Record<ModelId, ProviderId>;
9
+ export {};
10
+ //# sourceMappingURL=consts.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,KAAK,UAAU,GAAG,MAAM,CAAC;AAEzB;;;;GAIG;AACH,eAAO,MAAM,0BAA0B,EAAE,MAAM,CAAC,OAAO,EAAE,UAAU,CAKlE,CAAC"}
@@ -1,6 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
1
  export declare const FAL_AI_API_BASE_URL = "https://fal.run";
3
- type FalAiId = string;
4
- export declare const FAL_AI_SUPPORTED_MODEL_IDS: ProviderMapping<FalAiId>;
5
- export {};
2
+ /**
3
+ * See the registered mapping of HF model ID => Fal model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/fal-ai/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Fal and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Fal, please open an issue on the present repo
14
+ * and we will tag Fal team members.
15
+ *
16
+ * Thanks!
17
+ */
6
18
  //# sourceMappingURL=fal-ai.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAE/C,eAAO,MAAM,mBAAmB,oBAAoB,CAAC;AAErD,KAAK,OAAO,GAAG,MAAM,CAAC;AAEtB,eAAO,MAAM,0BAA0B,EAAE,eAAe,CAAC,OAAO,CAwB/D,CAAC"}
1
+ {"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,mBAAmB,oBAAoB,CAAC;AAErD;;;;;;;;;;;;;;;GAeG"}
@@ -1,6 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
1
  export declare const REPLICATE_API_BASE_URL = "https://api.replicate.com";
3
- type ReplicateId = string;
4
- export declare const REPLICATE_SUPPORTED_MODEL_IDS: ProviderMapping<ReplicateId>;
5
- export {};
2
+ /**
3
+ * See the registered mapping of HF model ID => Replicate model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/replicate/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Replicate and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Replicate, please open an issue on the present repo
14
+ * and we will tag Replicate team members.
15
+ *
16
+ * Thanks!
17
+ */
6
18
  //# sourceMappingURL=replicate.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"replicate.d.ts","sourceRoot":"","sources":["../../../src/providers/replicate.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAE/C,eAAO,MAAM,sBAAsB,8BAA8B,CAAC;AAElE,KAAK,WAAW,GAAG,MAAM,CAAC;AAE1B,eAAO,MAAM,6BAA6B,EAAE,eAAe,CAAC,WAAW,CAuBtE,CAAC"}
1
+ {"version":3,"file":"replicate.d.ts","sourceRoot":"","sources":["../../../src/providers/replicate.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,sBAAsB,8BAA8B,CAAC;AAElE;;;;;;;;;;;;;;;GAeG"}
@@ -1,6 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
1
  export declare const SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
3
- type SambanovaId = string;
4
- export declare const SAMBANOVA_SUPPORTED_MODEL_IDS: ProviderMapping<SambanovaId>;
5
- export {};
2
+ /**
3
+ * See the registered mapping of HF model ID => Sambanova model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/sambanova/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Sambanova and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Sambanova, please open an issue on the present repo
14
+ * and we will tag Sambanova team members.
15
+ *
16
+ * Thanks!
17
+ */
6
18
  //# sourceMappingURL=sambanova.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"sambanova.d.ts","sourceRoot":"","sources":["../../../src/providers/sambanova.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAE/C,eAAO,MAAM,sBAAsB,6BAA6B,CAAC;AAEjE,KAAK,WAAW,GAAG,MAAM,CAAC;AAE1B,eAAO,MAAM,6BAA6B,EAAE,eAAe,CAAC,WAAW,CAiBtE,CAAC"}
1
+ {"version":3,"file":"sambanova.d.ts","sourceRoot":"","sources":["../../../src/providers/sambanova.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,sBAAsB,6BAA6B,CAAC;AAEjE;;;;;;;;;;;;;;;GAeG"}
@@ -1,12 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
1
  export declare const TOGETHER_API_BASE_URL = "https://api.together.xyz";
3
2
  /**
4
- * Same comment as in sambanova.ts
3
+ * See the registered mapping of HF model ID => Together model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/together/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Together and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Together, please open an issue on the present repo
14
+ * and we will tag Together team members.
15
+ *
16
+ * Thanks!
5
17
  */
6
- type TogetherId = string;
7
- /**
8
- * https://docs.together.ai/reference/models-1
9
- */
10
- export declare const TOGETHER_SUPPORTED_MODEL_IDS: ProviderMapping<TogetherId>;
11
- export {};
12
18
  //# sourceMappingURL=together.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"together.d.ts","sourceRoot":"","sources":["../../../src/providers/together.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAE/C,eAAO,MAAM,qBAAqB,6BAA6B,CAAC;AAEhE;;GAEG;AACH,KAAK,UAAU,GAAG,MAAM,CAAC;AAEzB;;GAEG;AACH,eAAO,MAAM,4BAA4B,EAAE,eAAe,CAAC,UAAU,CA8CpE,CAAC"}
1
+ {"version":3,"file":"together.d.ts","sourceRoot":"","sources":["../../../src/providers/together.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,qBAAqB,6BAA6B,CAAC;AAEhE;;;;;;;;;;;;;;;GAeG"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "3.1.6",
3
+ "version": "3.2.0",
4
4
  "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -39,7 +39,7 @@
39
39
  },
40
40
  "type": "module",
41
41
  "dependencies": {
42
- "@huggingface/tasks": "^0.15.7"
42
+ "@huggingface/tasks": "^0.15.9"
43
43
  },
44
44
  "devDependencies": {
45
45
  "@types/node": "18.13.0"
package/src/config.ts CHANGED
@@ -1 +1,2 @@
1
1
  export const HF_HUB_URL = "https://huggingface.co";
2
+ export const HF_ROUTER_URL = "https://router.huggingface.co";
package/src/index.ts CHANGED
@@ -1,9 +1,4 @@
1
- export type { ProviderMapping } from "./providers/types";
2
1
  export { HfInference, HfInferenceEndpoint } from "./HfInference";
3
2
  export { InferenceOutputError } from "./lib/InferenceOutputError";
4
- export { FAL_AI_SUPPORTED_MODEL_IDS } from "./providers/fal-ai";
5
- export { REPLICATE_SUPPORTED_MODEL_IDS } from "./providers/replicate";
6
- export { SAMBANOVA_SUPPORTED_MODEL_IDS } from "./providers/sambanova";
7
- export { TOGETHER_SUPPORTED_MODEL_IDS } from "./providers/together";
8
3
  export * from "./types";
9
4
  export * from "./tasks";
@@ -0,0 +1,74 @@
1
+ import type { WidgetType } from "@huggingface/tasks";
2
+ import type { InferenceProvider, InferenceTask, ModelId, Options, RequestArgs } from "../types";
3
+ import { HF_HUB_URL } from "../config";
4
+ import { HARDCODED_MODEL_ID_MAPPING } from "../providers/consts";
5
+
6
+ type InferenceProviderMapping = Partial<
7
+ Record<InferenceProvider, { providerId: string; status: "live" | "staging"; task: WidgetType }>
8
+ >;
9
+ const inferenceProviderMappingCache = new Map<ModelId, InferenceProviderMapping>();
10
+
11
+ export async function getProviderModelId(
12
+ params: {
13
+ model: string;
14
+ provider: InferenceProvider;
15
+ },
16
+ args: RequestArgs,
17
+ options: {
18
+ taskHint?: InferenceTask;
19
+ chatCompletion?: boolean;
20
+ fetch?: Options["fetch"];
21
+ } = {}
22
+ ): Promise<string> {
23
+ if (params.provider === "hf-inference") {
24
+ return params.model;
25
+ }
26
+ if (!options.taskHint) {
27
+ throw new Error("taskHint must be specified when using a third-party provider");
28
+ }
29
+ const task: WidgetType =
30
+ options.taskHint === "text-generation" && options.chatCompletion ? "conversational" : options.taskHint;
31
+
32
+ // A dict called HARDCODED_MODEL_ID_MAPPING takes precedence in all cases (useful for dev purposes)
33
+ if (HARDCODED_MODEL_ID_MAPPING[params.model]) {
34
+ return HARDCODED_MODEL_ID_MAPPING[params.model];
35
+ }
36
+
37
+ let inferenceProviderMapping: InferenceProviderMapping | null;
38
+ if (inferenceProviderMappingCache.has(params.model)) {
39
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
40
+ inferenceProviderMapping = inferenceProviderMappingCache.get(params.model)!;
41
+ } else {
42
+ inferenceProviderMapping = await (options?.fetch ?? fetch)(
43
+ `${HF_HUB_URL}/api/models/${params.model}?expand[]=inferenceProviderMapping`,
44
+ {
45
+ headers: args.accessToken?.startsWith("hf_") ? { Authorization: `Bearer ${args.accessToken}` } : {},
46
+ }
47
+ )
48
+ .then((resp) => resp.json())
49
+ .then((json) => json.inferenceProviderMapping)
50
+ .catch(() => null);
51
+ }
52
+
53
+ if (!inferenceProviderMapping) {
54
+ throw new Error(`We have not been able to find inference provider information for model ${params.model}.`);
55
+ }
56
+
57
+ const providerMapping = inferenceProviderMapping[params.provider];
58
+ if (providerMapping) {
59
+ if (providerMapping.task !== task) {
60
+ throw new Error(
61
+ `Model ${params.model} is not supported for task ${task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`
62
+ );
63
+ }
64
+ if (providerMapping.status === "staging") {
65
+ console.warn(
66
+ `Model ${params.model} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
67
+ );
68
+ }
69
+ // TODO: how is it handled server-side if model has multiple tasks (e.g. `text-generation` + `conversational`)?
70
+ return providerMapping.providerId;
71
+ }
72
+
73
+ throw new Error(`Model ${params.model} is not supported provider ${params.provider}.`);
74
+ }
@@ -1,15 +1,15 @@
1
- import type { WidgetType } from "@huggingface/tasks";
2
- import { HF_HUB_URL } from "../config";
3
- import { FAL_AI_API_BASE_URL, FAL_AI_SUPPORTED_MODEL_IDS } from "../providers/fal-ai";
4
- import { REPLICATE_API_BASE_URL, REPLICATE_SUPPORTED_MODEL_IDS } from "../providers/replicate";
5
- import { SAMBANOVA_API_BASE_URL, SAMBANOVA_SUPPORTED_MODEL_IDS } from "../providers/sambanova";
6
- import { TOGETHER_API_BASE_URL, TOGETHER_SUPPORTED_MODEL_IDS } from "../providers/together";
1
+ import { HF_HUB_URL, HF_ROUTER_URL } from "../config";
2
+ import { FAL_AI_API_BASE_URL } from "../providers/fal-ai";
3
+ import { REPLICATE_API_BASE_URL } from "../providers/replicate";
4
+ import { SAMBANOVA_API_BASE_URL } from "../providers/sambanova";
5
+ import { TOGETHER_API_BASE_URL } from "../providers/together";
7
6
  import type { InferenceProvider } from "../types";
8
7
  import type { InferenceTask, Options, RequestArgs } from "../types";
9
8
  import { isUrl } from "./isUrl";
10
9
  import { version as packageVersion, name as packageName } from "../../package.json";
10
+ import { getProviderModelId } from "./getProviderModelId";
11
11
 
12
- const HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_HUB_URL}/api/inference-proxy/{{PROVIDER}}`;
12
+ const HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
13
13
 
14
14
  /**
15
15
  * Lazy-loaded from huggingface.co/api/tasks when needed
@@ -49,18 +49,16 @@ export async function makeRequestOptions(
49
49
  if (maybeModel && isUrl(maybeModel)) {
50
50
  throw new Error(`Model URLs are no longer supported. Use endpointUrl instead.`);
51
51
  }
52
-
53
- let model: string;
54
- if (!maybeModel) {
55
- if (taskHint) {
56
- model = mapModel({ model: await loadDefaultModel(taskHint), provider, taskHint, chatCompletion });
57
- } else {
58
- throw new Error("No model provided, and no default model found for this task");
59
- /// TODO : change error message ^
60
- }
61
- } else {
62
- model = mapModel({ model: maybeModel, provider, taskHint, chatCompletion });
52
+ if (!maybeModel && !taskHint) {
53
+ throw new Error("No model provided, and no task has been specified.");
63
54
  }
55
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
56
+ const hfModel = maybeModel ?? (await loadDefaultModel(taskHint!));
57
+ const model = await getProviderModelId({ model: hfModel, provider }, args, {
58
+ taskHint,
59
+ chatCompletion,
60
+ fetch: options?.fetch,
61
+ });
64
62
 
65
63
  /// If accessToken is passed, it should take precedence over includeCredentials
66
64
  const authMethod = accessToken
@@ -153,39 +151,6 @@ export async function makeRequestOptions(
153
151
  return { url, info };
154
152
  }
155
153
 
156
- function mapModel(params: {
157
- model: string;
158
- provider: InferenceProvider;
159
- taskHint: InferenceTask | undefined;
160
- chatCompletion: boolean | undefined;
161
- }): string {
162
- if (params.provider === "hf-inference") {
163
- return params.model;
164
- }
165
- if (!params.taskHint) {
166
- throw new Error("taskHint must be specified when using a third-party provider");
167
- }
168
- const task: WidgetType =
169
- params.taskHint === "text-generation" && params.chatCompletion ? "conversational" : params.taskHint;
170
- const model = (() => {
171
- switch (params.provider) {
172
- case "fal-ai":
173
- return FAL_AI_SUPPORTED_MODEL_IDS[task]?.[params.model];
174
- case "replicate":
175
- return REPLICATE_SUPPORTED_MODEL_IDS[task]?.[params.model];
176
- case "sambanova":
177
- return SAMBANOVA_SUPPORTED_MODEL_IDS[task]?.[params.model];
178
- case "together":
179
- return TOGETHER_SUPPORTED_MODEL_IDS[task]?.[params.model];
180
- }
181
- })();
182
-
183
- if (!model) {
184
- throw new Error(`Model ${params.model} is not supported for task ${task} and provider ${params.provider}`);
185
- }
186
- return model;
187
- }
188
-
189
154
  function makeUrl(params: {
190
155
  authMethod: "none" | "hf-token" | "credentials-include" | "provider-key";
191
156
  chatCompletion: boolean;
@@ -0,0 +1,15 @@
1
+ import type { ModelId } from "../types";
2
+
3
+ type ProviderId = string;
4
+
5
+ /**
6
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co
7
+ * for a given Inference Provider,
8
+ * you can add it to the following dictionary, for dev purposes.
9
+ */
10
+ export const HARDCODED_MODEL_ID_MAPPING: Record<ModelId, ProviderId> = {
11
+ /**
12
+ * "HF model ID" => "Model ID on Inference Provider's side"
13
+ */
14
+ // "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
15
+ };
@@ -1,31 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
-
3
1
  export const FAL_AI_API_BASE_URL = "https://fal.run";
4
2
 
5
- type FalAiId = string;
6
-
7
- export const FAL_AI_SUPPORTED_MODEL_IDS: ProviderMapping<FalAiId> = {
8
- "text-to-image": {
9
- "black-forest-labs/FLUX.1-schnell": "fal-ai/flux/schnell",
10
- "black-forest-labs/FLUX.1-dev": "fal-ai/flux/dev",
11
- "playgroundai/playground-v2.5-1024px-aesthetic": "fal-ai/playground-v25",
12
- "ByteDance/SDXL-Lightning": "fal-ai/lightning-models",
13
- "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": "fal-ai/pixart-sigma",
14
- "stabilityai/stable-diffusion-3-medium": "fal-ai/stable-diffusion-v3-medium",
15
- "Warlord-K/Sana-1024": "fal-ai/sana",
16
- "fal/AuraFlow-v0.2": "fal-ai/aura-flow",
17
- "stabilityai/stable-diffusion-3.5-large": "fal-ai/stable-diffusion-v35-large",
18
- "stabilityai/stable-diffusion-3.5-large-turbo": "fal-ai/stable-diffusion-v35-large/turbo",
19
- "stabilityai/stable-diffusion-3.5-medium": "fal-ai/stable-diffusion-v35-medium",
20
- "Kwai-Kolors/Kolors": "fal-ai/kolors",
21
- },
22
- "automatic-speech-recognition": {
23
- "openai/whisper-large-v3": "fal-ai/whisper",
24
- },
25
- "text-to-video": {
26
- "genmo/mochi-1-preview": "fal-ai/mochi-v1",
27
- "tencent/HunyuanVideo": "fal-ai/hunyuan-video",
28
- "THUDM/CogVideoX-5b": "fal-ai/cogvideox-5b",
29
- "Lightricks/LTX-Video": "fal-ai/ltx-video",
30
- },
31
- };
3
+ /**
4
+ * See the registered mapping of HF model ID => Fal model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/fal-ai/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Fal and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Fal, please open an issue on the present repo
15
+ * and we will tag Fal team members.
16
+ *
17
+ * Thanks!
18
+ */
@@ -1,30 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
-
3
1
  export const REPLICATE_API_BASE_URL = "https://api.replicate.com";
4
2
 
5
- type ReplicateId = string;
6
-
7
- export const REPLICATE_SUPPORTED_MODEL_IDS: ProviderMapping<ReplicateId> = {
8
- "text-to-image": {
9
- "black-forest-labs/FLUX.1-dev": "black-forest-labs/flux-dev",
10
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/flux-schnell",
11
- "ByteDance/Hyper-SD":
12
- "bytedance/hyper-flux-16step:382cf8959fb0f0d665b26e7e80b8d6dc3faaef1510f14ce017e8c732bb3d1eb7",
13
- "ByteDance/SDXL-Lightning":
14
- "bytedance/sdxl-lightning-4step:5599ed30703defd1d160a25a63321b4dec97101d98b4674bcc56e41f62f35637",
15
- "playgroundai/playground-v2.5-1024px-aesthetic":
16
- "playgroundai/playground-v2.5-1024px-aesthetic:a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
17
- "stabilityai/stable-diffusion-3.5-large-turbo": "stability-ai/stable-diffusion-3.5-large-turbo",
18
- "stabilityai/stable-diffusion-3.5-large": "stability-ai/stable-diffusion-3.5-large",
19
- "stabilityai/stable-diffusion-3.5-medium": "stability-ai/stable-diffusion-3.5-medium",
20
- "stabilityai/stable-diffusion-xl-base-1.0":
21
- "stability-ai/sdxl:7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc",
22
- },
23
- "text-to-speech": {
24
- "OuteAI/OuteTTS-0.3-500M": "jbilcke/oute-tts:3c645149db020c85d080e2f8cfe482a0e68189a922cde964fa9e80fb179191f3",
25
- "hexgrad/Kokoro-82M": "jaaari/kokoro-82m:dfdf537ba482b029e0a761699e6f55e9162cfd159270bfe0e44857caa5f275a6",
26
- },
27
- "text-to-video": {
28
- "genmo/mochi-1-preview": "genmoai/mochi-1:1944af04d098ef69bed7f9d335d102e652203f268ec4aaa2d836f6217217e460",
29
- },
30
- };
3
+ /**
4
+ * See the registered mapping of HF model ID => Replicate model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/replicate/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Replicate and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Replicate, please open an issue on the present repo
15
+ * and we will tag Replicate team members.
16
+ *
17
+ * Thanks!
18
+ */
@@ -1,24 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
-
3
1
  export const SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
4
2
 
5
- type SambanovaId = string;
6
-
7
- export const SAMBANOVA_SUPPORTED_MODEL_IDS: ProviderMapping<SambanovaId> = {
8
- /** Chat completion / conversational */
9
- conversational: {
10
- "deepseek-ai/DeepSeek-Distill-R1-Llama-70B": "DeepSeek-Distill-R1-Llama-70B",
11
- "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
12
- "Qwen/Qwen2.5-72B-Instruct": "Qwen2.5-72B-Instruct",
13
- "Qwen/QwQ-32B-Preview": "QwQ-32B-Preview",
14
- "meta-llama/Llama-3.3-70B-Instruct": "Meta-Llama-3.3-70B-Instruct",
15
- "meta-llama/Llama-3.2-1B-Instruct": "Meta-Llama-3.2-1B-Instruct",
16
- "meta-llama/Llama-3.2-3B-Instruct": "Meta-Llama-3.2-3B-Instruct",
17
- "meta-llama/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
18
- "meta-llama/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
19
- "meta-llama/Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
20
- "meta-llama/Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
21
- "meta-llama/Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
22
- "meta-llama/Llama-Guard-3-8B": "Meta-Llama-Guard-3-8B",
23
- },
24
- };
3
+ /**
4
+ * See the registered mapping of HF model ID => Sambanova model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/sambanova/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Sambanova and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Sambanova, please open an issue on the present repo
15
+ * and we will tag Sambanova team members.
16
+ *
17
+ * Thanks!
18
+ */
@@ -1,59 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
-
3
1
  export const TOGETHER_API_BASE_URL = "https://api.together.xyz";
4
2
 
5
3
  /**
6
- * Same comment as in sambanova.ts
7
- */
8
- type TogetherId = string;
9
-
10
- /**
11
- * https://docs.together.ai/reference/models-1
4
+ * See the registered mapping of HF model ID => Together model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/together/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Together and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Together, please open an issue on the present repo
15
+ * and we will tag Together team members.
16
+ *
17
+ * Thanks!
12
18
  */
13
- export const TOGETHER_SUPPORTED_MODEL_IDS: ProviderMapping<TogetherId> = {
14
- "text-to-image": {
15
- "black-forest-labs/FLUX.1-Canny-dev": "black-forest-labs/FLUX.1-canny",
16
- "black-forest-labs/FLUX.1-Depth-dev": "black-forest-labs/FLUX.1-depth",
17
- "black-forest-labs/FLUX.1-dev": "black-forest-labs/FLUX.1-dev",
18
- "black-forest-labs/FLUX.1-Redux-dev": "black-forest-labs/FLUX.1-redux",
19
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-pro",
20
- "stabilityai/stable-diffusion-xl-base-1.0": "stabilityai/stable-diffusion-xl-base-1.0",
21
- },
22
- conversational: {
23
- "databricks/dbrx-instruct": "databricks/dbrx-instruct",
24
- "deepseek-ai/DeepSeek-R1": "deepseek-ai/DeepSeek-R1",
25
- "deepseek-ai/DeepSeek-V3": "deepseek-ai/DeepSeek-V3",
26
- "deepseek-ai/deepseek-llm-67b-chat": "deepseek-ai/deepseek-llm-67b-chat",
27
- "google/gemma-2-9b-it": "google/gemma-2-9b-it",
28
- "google/gemma-2b-it": "google/gemma-2-27b-it",
29
- "meta-llama/Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
30
- "meta-llama/Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
31
- "meta-llama/Llama-3.2-11B-Vision-Instruct": "meta-llama/Llama-Vision-Free",
32
- "meta-llama/Llama-3.2-3B-Instruct": "meta-llama/Llama-3.2-3B-Instruct-Turbo",
33
- "meta-llama/Llama-3.2-90B-Vision-Instruct": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
34
- "meta-llama/Llama-3.3-70B-Instruct": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
35
- "meta-llama/Meta-Llama-3-70B-Instruct": "meta-llama/Llama-3-70b-chat-hf",
36
- "meta-llama/Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
37
- "meta-llama/Meta-Llama-3.1-405B-Instruct": "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
38
- "meta-llama/Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
39
- "meta-llama/Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo-128K",
40
- "microsoft/WizardLM-2-8x22B": "microsoft/WizardLM-2-8x22B",
41
- "mistralai/Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
42
- "mistralai/Mistral-Small-24B-Instruct-2501": "mistralai/Mistral-Small-24B-Instruct-2501",
43
- "mistralai/Mixtral-8x22B-Instruct-v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1",
44
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
45
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
46
- "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
47
- "Qwen/Qwen2-72B-Instruct": "Qwen/Qwen2-72B-Instruct",
48
- "Qwen/Qwen2.5-72B-Instruct": "Qwen/Qwen2.5-72B-Instruct-Turbo",
49
- "Qwen/Qwen2.5-7B-Instruct": "Qwen/Qwen2.5-7B-Instruct-Turbo",
50
- "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen/Qwen2.5-Coder-32B-Instruct",
51
- "Qwen/QwQ-32B-Preview": "Qwen/QwQ-32B-Preview",
52
- "scb10x/llama-3-typhoon-v1.5-8b-instruct": "scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct",
53
- "scb10x/llama-3-typhoon-v1.5x-70b-instruct-awq": "scb10x/scb10x-llama3-typhoon-v1-5x-4f316",
54
- },
55
- "text-generation": {
56
- "meta-llama/Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
57
- "mistralai/Mixtral-8x7B-v0.1": "mistralai/Mixtral-8x7B-v0.1",
58
- },
59
- };
@@ -1,4 +0,0 @@
1
- import type { WidgetType } from "@huggingface/tasks";
2
- import type { ModelId } from "../types";
3
- export type ProviderMapping<ProviderId extends string> = Partial<Record<WidgetType, Partial<Record<ModelId, ProviderId>>>>;
4
- //# sourceMappingURL=types.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../../src/providers/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,MAAM,MAAM,eAAe,CAAC,UAAU,SAAS,MAAM,IAAI,OAAO,CAC/D,MAAM,CAAC,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,OAAO,EAAE,UAAU,CAAC,CAAC,CAAC,CACxD,CAAC"}
@@ -1,6 +0,0 @@
1
- import type { WidgetType } from "@huggingface/tasks";
2
- import type { ModelId } from "../types";
3
-
4
- export type ProviderMapping<ProviderId extends string> = Partial<
5
- Record<WidgetType, Partial<Record<ModelId, ProviderId>>>
6
- >;