@huggingface/inference 3.2.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -46,7 +46,12 @@ Your access token should be kept private. If you need to protect it in front-end
46
46
 
47
47
  You can send inference requests to third-party providers with the inference client.
48
48
 
49
- Currently, we support the following providers: [Fal.ai](https://fal.ai), [Replicate](https://replicate.com), [Together](https://together.xyz) and [Sambanova](https://sambanova.ai).
49
+ Currently, we support the following providers:
50
+ - [Fal.ai](https://fal.ai)
51
+ - [Fireworks AI](https://fireworks.ai)
52
+ - [Replicate](https://replicate.com)
53
+ - [Sambanova](https://sambanova.ai)
54
+ - [Together](https://together.xyz)
50
55
 
51
56
  To send requests to a third-party provider, you have to pass the `provider` parameter to the inference function. Make sure your request is authenticated with an access token.
52
57
  ```ts
@@ -64,10 +69,11 @@ When authenticated with a Hugging Face access token, the request is routed throu
64
69
  When authenticated with a third-party provider key, the request is made directly against that provider's inference API.
65
70
 
66
71
  Only a subset of models are supported when requesting third-party providers. You can check the list of supported models per pipeline tasks here:
67
- - [Fal.ai supported models](./src/providers/fal-ai.ts)
68
- - [Replicate supported models](./src/providers/replicate.ts)
69
- - [Sambanova supported models](./src/providers/sambanova.ts)
70
- - [Together supported models](./src/providers/together.ts)
72
+ - [Fal.ai supported models](https://huggingface.co/api/partners/fal-ai/models)
73
+ - [Fireworks AI supported models](https://huggingface.co/api/partners/fireworks-ai/models)
74
+ - [Replicate supported models](https://huggingface.co/api/partners/replicate/models)
75
+ - [Sambanova supported models](https://huggingface.co/api/partners/sambanova/models)
76
+ - [Together supported models](https://huggingface.co/api/partners/together/models)
71
77
  - [HF Inference API (serverless)](https://huggingface.co/models?inference=warm&sort=trending)
72
78
 
73
79
  ❗**Important note:** To be compatible, the third-party API must adhere to the "standard" shape API we expect on HF model pages for each pipeline task type.
package/dist/index.cjs CHANGED
@@ -112,6 +112,9 @@ var SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
112
112
  // src/providers/together.ts
113
113
  var TOGETHER_API_BASE_URL = "https://api.together.xyz";
114
114
 
115
+ // src/providers/fireworks-ai.ts
116
+ var FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai/inference";
117
+
115
118
  // src/lib/isUrl.ts
116
119
  function isUrl(modelOrUrl) {
117
120
  return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
@@ -119,14 +122,22 @@ function isUrl(modelOrUrl) {
119
122
 
120
123
  // package.json
121
124
  var name = "@huggingface/inference";
122
- var version = "3.2.0";
125
+ var version = "3.3.0";
123
126
 
124
127
  // src/providers/consts.ts
125
128
  var HARDCODED_MODEL_ID_MAPPING = {
126
129
  /**
127
130
  * "HF model ID" => "Model ID on Inference Provider's side"
131
+ *
132
+ * Example:
133
+ * "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
128
134
  */
129
- // "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
135
+ "fal-ai": {},
136
+ "fireworks-ai": {},
137
+ "hf-inference": {},
138
+ replicate: {},
139
+ sambanova: {},
140
+ together: {}
130
141
  };
131
142
 
132
143
  // src/lib/getProviderModelId.ts
@@ -139,8 +150,8 @@ async function getProviderModelId(params, args, options = {}) {
139
150
  throw new Error("taskHint must be specified when using a third-party provider");
140
151
  }
141
152
  const task = options.taskHint === "text-generation" && options.chatCompletion ? "conversational" : options.taskHint;
142
- if (HARDCODED_MODEL_ID_MAPPING[params.model]) {
143
- return HARDCODED_MODEL_ID_MAPPING[params.model];
153
+ if (HARDCODED_MODEL_ID_MAPPING[params.provider]?.[params.model]) {
154
+ return HARDCODED_MODEL_ID_MAPPING[params.provider][params.model];
144
155
  }
145
156
  let inferenceProviderMapping;
146
157
  if (inferenceProviderMappingCache.has(params.model)) {
@@ -291,6 +302,13 @@ function makeUrl(params) {
291
302
  }
292
303
  return baseUrl;
293
304
  }
305
+ case "fireworks-ai": {
306
+ const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : FIREWORKS_AI_API_BASE_URL;
307
+ if (params.taskHint === "text-generation" && params.chatCompletion) {
308
+ return `${baseUrl}/v1/chat/completions`;
309
+ }
310
+ return baseUrl;
311
+ }
294
312
  default: {
295
313
  const baseUrl = HF_HUB_INFERENCE_PROXY_TEMPLATE.replaceAll("{{PROVIDER}}", "hf-inference");
296
314
  const url = params.forceTask ? `${baseUrl}/pipeline/${params.forceTask}/${params.model}` : `${baseUrl}/models/${params.model}`;
@@ -1269,7 +1287,14 @@ var HfInferenceEndpoint = class {
1269
1287
  };
1270
1288
 
1271
1289
  // src/types.ts
1272
- var INFERENCE_PROVIDERS = ["fal-ai", "replicate", "sambanova", "together", "hf-inference"];
1290
+ var INFERENCE_PROVIDERS = [
1291
+ "fal-ai",
1292
+ "fireworks-ai",
1293
+ "hf-inference",
1294
+ "replicate",
1295
+ "sambanova",
1296
+ "together"
1297
+ ];
1273
1298
  // Annotate the CommonJS export names for ESM import in node:
1274
1299
  0 && (module.exports = {
1275
1300
  HfInference,
package/dist/index.js CHANGED
@@ -57,6 +57,9 @@ var SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
57
57
  // src/providers/together.ts
58
58
  var TOGETHER_API_BASE_URL = "https://api.together.xyz";
59
59
 
60
+ // src/providers/fireworks-ai.ts
61
+ var FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai/inference";
62
+
60
63
  // src/lib/isUrl.ts
61
64
  function isUrl(modelOrUrl) {
62
65
  return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
@@ -64,14 +67,22 @@ function isUrl(modelOrUrl) {
64
67
 
65
68
  // package.json
66
69
  var name = "@huggingface/inference";
67
- var version = "3.2.0";
70
+ var version = "3.3.0";
68
71
 
69
72
  // src/providers/consts.ts
70
73
  var HARDCODED_MODEL_ID_MAPPING = {
71
74
  /**
72
75
  * "HF model ID" => "Model ID on Inference Provider's side"
76
+ *
77
+ * Example:
78
+ * "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
73
79
  */
74
- // "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
80
+ "fal-ai": {},
81
+ "fireworks-ai": {},
82
+ "hf-inference": {},
83
+ replicate: {},
84
+ sambanova: {},
85
+ together: {}
75
86
  };
76
87
 
77
88
  // src/lib/getProviderModelId.ts
@@ -84,8 +95,8 @@ async function getProviderModelId(params, args, options = {}) {
84
95
  throw new Error("taskHint must be specified when using a third-party provider");
85
96
  }
86
97
  const task = options.taskHint === "text-generation" && options.chatCompletion ? "conversational" : options.taskHint;
87
- if (HARDCODED_MODEL_ID_MAPPING[params.model]) {
88
- return HARDCODED_MODEL_ID_MAPPING[params.model];
98
+ if (HARDCODED_MODEL_ID_MAPPING[params.provider]?.[params.model]) {
99
+ return HARDCODED_MODEL_ID_MAPPING[params.provider][params.model];
89
100
  }
90
101
  let inferenceProviderMapping;
91
102
  if (inferenceProviderMappingCache.has(params.model)) {
@@ -236,6 +247,13 @@ function makeUrl(params) {
236
247
  }
237
248
  return baseUrl;
238
249
  }
250
+ case "fireworks-ai": {
251
+ const baseUrl = shouldProxy ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider) : FIREWORKS_AI_API_BASE_URL;
252
+ if (params.taskHint === "text-generation" && params.chatCompletion) {
253
+ return `${baseUrl}/v1/chat/completions`;
254
+ }
255
+ return baseUrl;
256
+ }
239
257
  default: {
240
258
  const baseUrl = HF_HUB_INFERENCE_PROXY_TEMPLATE.replaceAll("{{PROVIDER}}", "hf-inference");
241
259
  const url = params.forceTask ? `${baseUrl}/pipeline/${params.forceTask}/${params.model}` : `${baseUrl}/models/${params.model}`;
@@ -1214,7 +1232,14 @@ var HfInferenceEndpoint = class {
1214
1232
  };
1215
1233
 
1216
1234
  // src/types.ts
1217
- var INFERENCE_PROVIDERS = ["fal-ai", "replicate", "sambanova", "together", "hf-inference"];
1235
+ var INFERENCE_PROVIDERS = [
1236
+ "fal-ai",
1237
+ "fireworks-ai",
1238
+ "hf-inference",
1239
+ "replicate",
1240
+ "sambanova",
1241
+ "together"
1242
+ ];
1218
1243
  export {
1219
1244
  HfInference,
1220
1245
  HfInferenceEndpoint,
@@ -1 +1 @@
1
- {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAMA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAapE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAqH7C"}
1
+ {"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAOA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AAapE;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,yFAAyF;IACzF,SAAS,CAAC,EAAE,MAAM,GAAG,aAAa,CAAC;IACnC,sCAAsC;IACtC,QAAQ,CAAC,EAAE,aAAa,CAAC;IACzB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAqH7C"}
@@ -1,10 +1,13 @@
1
- import type { ModelId } from "../types";
1
+ import type { InferenceProvider } from "../types";
2
+ import { type ModelId } from "../types";
2
3
  type ProviderId = string;
3
4
  /**
4
5
  * If you want to try to run inference for a new model locally before it's registered on huggingface.co
5
6
  * for a given Inference Provider,
6
7
  * you can add it to the following dictionary, for dev purposes.
8
+ *
9
+ * We also inject into this dictionary from tests.
7
10
  */
8
- export declare const HARDCODED_MODEL_ID_MAPPING: Record<ModelId, ProviderId>;
11
+ export declare const HARDCODED_MODEL_ID_MAPPING: Record<InferenceProvider, Record<ModelId, ProviderId>>;
9
12
  export {};
10
13
  //# sourceMappingURL=consts.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,KAAK,UAAU,GAAG,MAAM,CAAC;AAEzB;;;;GAIG;AACH,eAAO,MAAM,0BAA0B,EAAE,MAAM,CAAC,OAAO,EAAE,UAAU,CAKlE,CAAC"}
1
+ {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,KAAK,UAAU,GAAG,MAAM,CAAC;AACzB;;;;;;GAMG;AACH,eAAO,MAAM,0BAA0B,EAAE,MAAM,CAAC,iBAAiB,EAAE,MAAM,CAAC,OAAO,EAAE,UAAU,CAAC,CAa7F,CAAC"}
@@ -0,0 +1,18 @@
1
+ export declare const FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai/inference";
2
+ /**
3
+ * See the registered mapping of HF model ID => Fireworks model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/fireworks/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Fireworks and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Fireworks, please open an issue on the present repo
14
+ * and we will tag Fireworks team members.
15
+ *
16
+ * Thanks!
17
+ */
18
+ //# sourceMappingURL=fireworks-ai.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"fireworks-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fireworks-ai.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,yBAAyB,uCAAuC,CAAC;AAE9E;;;;;;;;;;;;;;;GAeG"}
@@ -38,7 +38,7 @@ export interface Options {
38
38
  includeCredentials?: string | boolean;
39
39
  }
40
40
  export type InferenceTask = Exclude<PipelineType, "other">;
41
- export declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "replicate", "sambanova", "together", "hf-inference"];
41
+ export declare const INFERENCE_PROVIDERS: readonly ["fal-ai", "fireworks-ai", "hf-inference", "replicate", "sambanova", "together"];
42
42
  export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
43
43
  export interface BaseArgs {
44
44
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAE5E;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,eAAO,MAAM,mBAAmB,2EAA4E,CAAC;AAC7G,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAE5E;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,eAAO,MAAM,mBAAmB,2FAOtB,CAAC;AACX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "3.2.0",
3
+ "version": "3.3.0",
4
4
  "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -30,8 +30,8 @@ export async function getProviderModelId(
30
30
  options.taskHint === "text-generation" && options.chatCompletion ? "conversational" : options.taskHint;
31
31
 
32
32
  // A dict called HARDCODED_MODEL_ID_MAPPING takes precedence in all cases (useful for dev purposes)
33
- if (HARDCODED_MODEL_ID_MAPPING[params.model]) {
34
- return HARDCODED_MODEL_ID_MAPPING[params.model];
33
+ if (HARDCODED_MODEL_ID_MAPPING[params.provider]?.[params.model]) {
34
+ return HARDCODED_MODEL_ID_MAPPING[params.provider][params.model];
35
35
  }
36
36
 
37
37
  let inferenceProviderMapping: InferenceProviderMapping | null;
@@ -3,6 +3,7 @@ import { FAL_AI_API_BASE_URL } from "../providers/fal-ai";
3
3
  import { REPLICATE_API_BASE_URL } from "../providers/replicate";
4
4
  import { SAMBANOVA_API_BASE_URL } from "../providers/sambanova";
5
5
  import { TOGETHER_API_BASE_URL } from "../providers/together";
6
+ import { FIREWORKS_AI_API_BASE_URL } from "../providers/fireworks-ai";
6
7
  import type { InferenceProvider } from "../types";
7
8
  import type { InferenceTask, Options, RequestArgs } from "../types";
8
9
  import { isUrl } from "./isUrl";
@@ -208,6 +209,15 @@ function makeUrl(params: {
208
209
  }
209
210
  return baseUrl;
210
211
  }
212
+ case "fireworks-ai": {
213
+ const baseUrl = shouldProxy
214
+ ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", params.provider)
215
+ : FIREWORKS_AI_API_BASE_URL;
216
+ if (params.taskHint === "text-generation" && params.chatCompletion) {
217
+ return `${baseUrl}/v1/chat/completions`;
218
+ }
219
+ return baseUrl;
220
+ }
211
221
  default: {
212
222
  const baseUrl = HF_HUB_INFERENCE_PROXY_TEMPLATE.replaceAll("{{PROVIDER}}", "hf-inference");
213
223
  const url = params.forceTask
@@ -1,15 +1,25 @@
1
- import type { ModelId } from "../types";
1
+ import type { InferenceProvider } from "../types";
2
+ import { type ModelId } from "../types";
2
3
 
3
4
  type ProviderId = string;
4
-
5
5
  /**
6
6
  * If you want to try to run inference for a new model locally before it's registered on huggingface.co
7
7
  * for a given Inference Provider,
8
8
  * you can add it to the following dictionary, for dev purposes.
9
+ *
10
+ * We also inject into this dictionary from tests.
9
11
  */
10
- export const HARDCODED_MODEL_ID_MAPPING: Record<ModelId, ProviderId> = {
12
+ export const HARDCODED_MODEL_ID_MAPPING: Record<InferenceProvider, Record<ModelId, ProviderId>> = {
11
13
  /**
12
14
  * "HF model ID" => "Model ID on Inference Provider's side"
15
+ *
16
+ * Example:
17
+ * "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
13
18
  */
14
- // "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
19
+ "fal-ai": {},
20
+ "fireworks-ai": {},
21
+ "hf-inference": {},
22
+ replicate: {},
23
+ sambanova: {},
24
+ together: {},
15
25
  };
@@ -0,0 +1,18 @@
1
+ export const FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai/inference";
2
+
3
+ /**
4
+ * See the registered mapping of HF model ID => Fireworks model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/fireworks/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Fireworks and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Fireworks, please open an issue on the present repo
15
+ * and we will tag Fireworks team members.
16
+ *
17
+ * Thanks!
18
+ */
package/src/types.ts CHANGED
@@ -44,7 +44,14 @@ export interface Options {
44
44
 
45
45
  export type InferenceTask = Exclude<PipelineType, "other">;
46
46
 
47
- export const INFERENCE_PROVIDERS = ["fal-ai", "replicate", "sambanova", "together", "hf-inference"] as const;
47
+ export const INFERENCE_PROVIDERS = [
48
+ "fal-ai",
49
+ "fireworks-ai",
50
+ "hf-inference",
51
+ "replicate",
52
+ "sambanova",
53
+ "together",
54
+ ] as const;
48
55
  export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
49
56
 
50
57
  export interface BaseArgs {