@huggingface/inference 3.1.5 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +3 -3
  2. package/dist/index.cjs +69 -153
  3. package/dist/index.js +69 -149
  4. package/dist/src/config.d.ts +1 -0
  5. package/dist/src/config.d.ts.map +1 -1
  6. package/dist/src/index.d.ts +0 -5
  7. package/dist/src/index.d.ts.map +1 -1
  8. package/dist/src/lib/getProviderModelId.d.ts +10 -0
  9. package/dist/src/lib/getProviderModelId.d.ts.map +1 -0
  10. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  11. package/dist/src/providers/consts.d.ts +10 -0
  12. package/dist/src/providers/consts.d.ts.map +1 -0
  13. package/dist/src/providers/fal-ai.d.ts +16 -4
  14. package/dist/src/providers/fal-ai.d.ts.map +1 -1
  15. package/dist/src/providers/replicate.d.ts +16 -4
  16. package/dist/src/providers/replicate.d.ts.map +1 -1
  17. package/dist/src/providers/sambanova.d.ts +16 -4
  18. package/dist/src/providers/sambanova.d.ts.map +1 -1
  19. package/dist/src/providers/together.d.ts +14 -8
  20. package/dist/src/providers/together.d.ts.map +1 -1
  21. package/dist/src/tasks/audio/textToSpeech.d.ts.map +1 -1
  22. package/dist/src/types.d.ts +3 -2
  23. package/dist/src/types.d.ts.map +1 -1
  24. package/dist/test/HfInference.spec.d.ts.map +1 -1
  25. package/package.json +2 -2
  26. package/src/config.ts +1 -0
  27. package/src/index.ts +0 -5
  28. package/src/lib/getProviderModelId.ts +74 -0
  29. package/src/lib/makeRequestOptions.ts +16 -51
  30. package/src/providers/consts.ts +15 -0
  31. package/src/providers/fal-ai.ts +16 -29
  32. package/src/providers/replicate.ts +16 -27
  33. package/src/providers/sambanova.ts +16 -21
  34. package/src/providers/together.ts +14 -55
  35. package/src/tasks/audio/textToSpeech.ts +11 -2
  36. package/src/types.ts +2 -2
  37. package/dist/src/providers/types.d.ts +0 -4
  38. package/dist/src/providers/types.d.ts.map +0 -1
  39. package/src/providers/types.ts +0 -6
@@ -0,0 +1 @@
1
+ {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC,KAAK,UAAU,GAAG,MAAM,CAAC;AAEzB;;;;GAIG;AACH,eAAO,MAAM,0BAA0B,EAAE,MAAM,CAAC,OAAO,EAAE,UAAU,CAKlE,CAAC"}
@@ -1,6 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
1
  export declare const FAL_AI_API_BASE_URL = "https://fal.run";
3
- type FalAiId = string;
4
- export declare const FAL_AI_SUPPORTED_MODEL_IDS: ProviderMapping<FalAiId>;
5
- export {};
2
+ /**
3
+ * See the registered mapping of HF model ID => Fal model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/fal-ai/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Fal and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Fal, please open an issue on the present repo
14
+ * and we will tag Fal team members.
15
+ *
16
+ * Thanks!
17
+ */
6
18
  //# sourceMappingURL=fal-ai.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAE/C,eAAO,MAAM,mBAAmB,oBAAoB,CAAC;AAErD,KAAK,OAAO,GAAG,MAAM,CAAC;AAEtB,eAAO,MAAM,0BAA0B,EAAE,eAAe,CAAC,OAAO,CAwB/D,CAAC"}
1
+ {"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,mBAAmB,oBAAoB,CAAC;AAErD;;;;;;;;;;;;;;;GAeG"}
@@ -1,6 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
1
  export declare const REPLICATE_API_BASE_URL = "https://api.replicate.com";
3
- type ReplicateId = string;
4
- export declare const REPLICATE_SUPPORTED_MODEL_IDS: ProviderMapping<ReplicateId>;
5
- export {};
2
+ /**
3
+ * See the registered mapping of HF model ID => Replicate model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/replicate/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Replicate and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Replicate, please open an issue on the present repo
14
+ * and we will tag Replicate team members.
15
+ *
16
+ * Thanks!
17
+ */
6
18
  //# sourceMappingURL=replicate.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"replicate.d.ts","sourceRoot":"","sources":["../../../src/providers/replicate.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAE/C,eAAO,MAAM,sBAAsB,8BAA8B,CAAC;AAElE,KAAK,WAAW,GAAG,MAAM,CAAC;AAE1B,eAAO,MAAM,6BAA6B,EAAE,eAAe,CAAC,WAAW,CAsBtE,CAAC"}
1
+ {"version":3,"file":"replicate.d.ts","sourceRoot":"","sources":["../../../src/providers/replicate.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,sBAAsB,8BAA8B,CAAC;AAElE;;;;;;;;;;;;;;;GAeG"}
@@ -1,6 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
1
  export declare const SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
3
- type SambanovaId = string;
4
- export declare const SAMBANOVA_SUPPORTED_MODEL_IDS: ProviderMapping<SambanovaId>;
5
- export {};
2
+ /**
3
+ * See the registered mapping of HF model ID => Sambanova model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/sambanova/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Sambanova and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Sambanova, please open an issue on the present repo
14
+ * and we will tag Sambanova team members.
15
+ *
16
+ * Thanks!
17
+ */
6
18
  //# sourceMappingURL=sambanova.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"sambanova.d.ts","sourceRoot":"","sources":["../../../src/providers/sambanova.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAE/C,eAAO,MAAM,sBAAsB,6BAA6B,CAAC;AAEjE,KAAK,WAAW,GAAG,MAAM,CAAC;AAE1B,eAAO,MAAM,6BAA6B,EAAE,eAAe,CAAC,WAAW,CAgBtE,CAAC"}
1
+ {"version":3,"file":"sambanova.d.ts","sourceRoot":"","sources":["../../../src/providers/sambanova.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,sBAAsB,6BAA6B,CAAC;AAEjE;;;;;;;;;;;;;;;GAeG"}
@@ -1,12 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
1
  export declare const TOGETHER_API_BASE_URL = "https://api.together.xyz";
3
2
  /**
4
- * Same comment as in sambanova.ts
3
+ * See the registered mapping of HF model ID => Together model ID here:
4
+ *
5
+ * https://huggingface.co/api/partners/together/models
6
+ *
7
+ * This is a publicly available mapping.
8
+ *
9
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
10
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
11
+ *
12
+ * - If you work at Together and want to update this mapping, please use the model mapping API we provide on huggingface.co
13
+ * - If you're a community member and want to add a new supported HF model to Together, please open an issue on the present repo
14
+ * and we will tag Together team members.
15
+ *
16
+ * Thanks!
5
17
  */
6
- type TogetherId = string;
7
- /**
8
- * https://docs.together.ai/reference/models-1
9
- */
10
- export declare const TOGETHER_SUPPORTED_MODEL_IDS: ProviderMapping<TogetherId>;
11
- export {};
12
18
  //# sourceMappingURL=together.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"together.d.ts","sourceRoot":"","sources":["../../../src/providers/together.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,SAAS,CAAC;AAE/C,eAAO,MAAM,qBAAqB,6BAA6B,CAAC;AAEhE;;GAEG;AACH,KAAK,UAAU,GAAG,MAAM,CAAC;AAEzB;;GAEG;AACH,eAAO,MAAM,4BAA4B,EAAE,eAAe,CAAC,UAAU,CA8CpE,CAAC"}
1
+ {"version":3,"file":"together.d.ts","sourceRoot":"","sources":["../../../src/providers/together.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,qBAAqB,6BAA6B,CAAC;AAEhE;;;;;;;;;;;;;;;GAeG"}
@@ -1 +1 @@
1
- {"version":3,"file":"textToSpeech.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/textToSpeech.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,oBAAoB,CAAC;AAE5D,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,KAAK,gBAAgB,GAAG,QAAQ,GAAG,iBAAiB,CAAC;AAKrD;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAsB3F"}
1
+ {"version":3,"file":"textToSpeech.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/textToSpeech.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,oBAAoB,CAAC;AAE5D,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,KAAK,gBAAgB,GAAG,QAAQ,GAAG,iBAAiB,CAAC;AAKrD;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CA+B3F"}
@@ -1,5 +1,4 @@
1
- import type { PipelineType } from "@huggingface/tasks";
2
- import type { ChatCompletionInput } from "@huggingface/tasks";
1
+ import type { ChatCompletionInput, PipelineType } from "@huggingface/tasks";
3
2
  /**
4
3
  * HF model id, like "meta-llama/Llama-3.3-70B-Instruct"
5
4
  */
@@ -78,6 +77,8 @@ export type RequestArgs = BaseArgs & ({
78
77
  inputs: unknown;
79
78
  } | {
80
79
  prompt: string;
80
+ } | {
81
+ text: string;
81
82
  } | {
82
83
  audio_url: string;
83
84
  } | ChatCompletionInput) & {
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AACvD,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAE9D;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,eAAO,MAAM,mBAAmB,2EAA4E,CAAC;AAC7G,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAE5E;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,eAAO,MAAM,mBAAmB,2EAA4E,CAAC;AAC7G,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"HfInference.spec.d.ts","sourceRoot":"","sources":["../../test/HfInference.spec.ts"],"names":[],"mappings":"AAKA,OAAO,OAAO,CAAC"}
1
+ {"version":3,"file":"HfInference.spec.d.ts","sourceRoot":"","sources":["../../test/HfInference.spec.ts"],"names":[],"mappings":"AAOA,OAAO,OAAO,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "3.1.5",
3
+ "version": "3.2.0",
4
4
  "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -39,7 +39,7 @@
39
39
  },
40
40
  "type": "module",
41
41
  "dependencies": {
42
- "@huggingface/tasks": "^0.15.5"
42
+ "@huggingface/tasks": "^0.15.9"
43
43
  },
44
44
  "devDependencies": {
45
45
  "@types/node": "18.13.0"
package/src/config.ts CHANGED
@@ -1 +1,2 @@
1
1
  export const HF_HUB_URL = "https://huggingface.co";
2
+ export const HF_ROUTER_URL = "https://router.huggingface.co";
package/src/index.ts CHANGED
@@ -1,9 +1,4 @@
1
- export type { ProviderMapping } from "./providers/types";
2
1
  export { HfInference, HfInferenceEndpoint } from "./HfInference";
3
2
  export { InferenceOutputError } from "./lib/InferenceOutputError";
4
- export { FAL_AI_SUPPORTED_MODEL_IDS } from "./providers/fal-ai";
5
- export { REPLICATE_SUPPORTED_MODEL_IDS } from "./providers/replicate";
6
- export { SAMBANOVA_SUPPORTED_MODEL_IDS } from "./providers/sambanova";
7
- export { TOGETHER_SUPPORTED_MODEL_IDS } from "./providers/together";
8
3
  export * from "./types";
9
4
  export * from "./tasks";
@@ -0,0 +1,74 @@
1
+ import type { WidgetType } from "@huggingface/tasks";
2
+ import type { InferenceProvider, InferenceTask, ModelId, Options, RequestArgs } from "../types";
3
+ import { HF_HUB_URL } from "../config";
4
+ import { HARDCODED_MODEL_ID_MAPPING } from "../providers/consts";
5
+
6
+ type InferenceProviderMapping = Partial<
7
+ Record<InferenceProvider, { providerId: string; status: "live" | "staging"; task: WidgetType }>
8
+ >;
9
+ const inferenceProviderMappingCache = new Map<ModelId, InferenceProviderMapping>();
10
+
11
+ export async function getProviderModelId(
12
+ params: {
13
+ model: string;
14
+ provider: InferenceProvider;
15
+ },
16
+ args: RequestArgs,
17
+ options: {
18
+ taskHint?: InferenceTask;
19
+ chatCompletion?: boolean;
20
+ fetch?: Options["fetch"];
21
+ } = {}
22
+ ): Promise<string> {
23
+ if (params.provider === "hf-inference") {
24
+ return params.model;
25
+ }
26
+ if (!options.taskHint) {
27
+ throw new Error("taskHint must be specified when using a third-party provider");
28
+ }
29
+ const task: WidgetType =
30
+ options.taskHint === "text-generation" && options.chatCompletion ? "conversational" : options.taskHint;
31
+
32
+ // A dict called HARDCODED_MODEL_ID_MAPPING takes precedence in all cases (useful for dev purposes)
33
+ if (HARDCODED_MODEL_ID_MAPPING[params.model]) {
34
+ return HARDCODED_MODEL_ID_MAPPING[params.model];
35
+ }
36
+
37
+ let inferenceProviderMapping: InferenceProviderMapping | null;
38
+ if (inferenceProviderMappingCache.has(params.model)) {
39
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
40
+ inferenceProviderMapping = inferenceProviderMappingCache.get(params.model)!;
41
+ } else {
42
+ inferenceProviderMapping = await (options?.fetch ?? fetch)(
43
+ `${HF_HUB_URL}/api/models/${params.model}?expand[]=inferenceProviderMapping`,
44
+ {
45
+ headers: args.accessToken?.startsWith("hf_") ? { Authorization: `Bearer ${args.accessToken}` } : {},
46
+ }
47
+ )
48
+ .then((resp) => resp.json())
49
+ .then((json) => json.inferenceProviderMapping)
50
+ .catch(() => null);
51
+ }
52
+
53
+ if (!inferenceProviderMapping) {
54
+ throw new Error(`We have not been able to find inference provider information for model ${params.model}.`);
55
+ }
56
+
57
+ const providerMapping = inferenceProviderMapping[params.provider];
58
+ if (providerMapping) {
59
+ if (providerMapping.task !== task) {
60
+ throw new Error(
61
+ `Model ${params.model} is not supported for task ${task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`
62
+ );
63
+ }
64
+ if (providerMapping.status === "staging") {
65
+ console.warn(
66
+ `Model ${params.model} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
67
+ );
68
+ }
69
+ // TODO: how is it handled server-side if model has multiple tasks (e.g. `text-generation` + `conversational`)?
70
+ return providerMapping.providerId;
71
+ }
72
+
73
+ throw new Error(`Model ${params.model} is not supported provider ${params.provider}.`);
74
+ }
@@ -1,15 +1,15 @@
1
- import type { WidgetType } from "@huggingface/tasks";
2
- import { HF_HUB_URL } from "../config";
3
- import { FAL_AI_API_BASE_URL, FAL_AI_SUPPORTED_MODEL_IDS } from "../providers/fal-ai";
4
- import { REPLICATE_API_BASE_URL, REPLICATE_SUPPORTED_MODEL_IDS } from "../providers/replicate";
5
- import { SAMBANOVA_API_BASE_URL, SAMBANOVA_SUPPORTED_MODEL_IDS } from "../providers/sambanova";
6
- import { TOGETHER_API_BASE_URL, TOGETHER_SUPPORTED_MODEL_IDS } from "../providers/together";
1
+ import { HF_HUB_URL, HF_ROUTER_URL } from "../config";
2
+ import { FAL_AI_API_BASE_URL } from "../providers/fal-ai";
3
+ import { REPLICATE_API_BASE_URL } from "../providers/replicate";
4
+ import { SAMBANOVA_API_BASE_URL } from "../providers/sambanova";
5
+ import { TOGETHER_API_BASE_URL } from "../providers/together";
7
6
  import type { InferenceProvider } from "../types";
8
7
  import type { InferenceTask, Options, RequestArgs } from "../types";
9
8
  import { isUrl } from "./isUrl";
10
9
  import { version as packageVersion, name as packageName } from "../../package.json";
10
+ import { getProviderModelId } from "./getProviderModelId";
11
11
 
12
- const HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_HUB_URL}/api/inference-proxy/{{PROVIDER}}`;
12
+ const HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
13
13
 
14
14
  /**
15
15
  * Lazy-loaded from huggingface.co/api/tasks when needed
@@ -49,18 +49,16 @@ export async function makeRequestOptions(
49
49
  if (maybeModel && isUrl(maybeModel)) {
50
50
  throw new Error(`Model URLs are no longer supported. Use endpointUrl instead.`);
51
51
  }
52
-
53
- let model: string;
54
- if (!maybeModel) {
55
- if (taskHint) {
56
- model = mapModel({ model: await loadDefaultModel(taskHint), provider, taskHint, chatCompletion });
57
- } else {
58
- throw new Error("No model provided, and no default model found for this task");
59
- /// TODO : change error message ^
60
- }
61
- } else {
62
- model = mapModel({ model: maybeModel, provider, taskHint, chatCompletion });
52
+ if (!maybeModel && !taskHint) {
53
+ throw new Error("No model provided, and no task has been specified.");
63
54
  }
55
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
56
+ const hfModel = maybeModel ?? (await loadDefaultModel(taskHint!));
57
+ const model = await getProviderModelId({ model: hfModel, provider }, args, {
58
+ taskHint,
59
+ chatCompletion,
60
+ fetch: options?.fetch,
61
+ });
64
62
 
65
63
  /// If accessToken is passed, it should take precedence over includeCredentials
66
64
  const authMethod = accessToken
@@ -153,39 +151,6 @@ export async function makeRequestOptions(
153
151
  return { url, info };
154
152
  }
155
153
 
156
- function mapModel(params: {
157
- model: string;
158
- provider: InferenceProvider;
159
- taskHint: InferenceTask | undefined;
160
- chatCompletion: boolean | undefined;
161
- }): string {
162
- if (params.provider === "hf-inference") {
163
- return params.model;
164
- }
165
- if (!params.taskHint) {
166
- throw new Error("taskHint must be specified when using a third-party provider");
167
- }
168
- const task: WidgetType =
169
- params.taskHint === "text-generation" && params.chatCompletion ? "conversational" : params.taskHint;
170
- const model = (() => {
171
- switch (params.provider) {
172
- case "fal-ai":
173
- return FAL_AI_SUPPORTED_MODEL_IDS[task]?.[params.model];
174
- case "replicate":
175
- return REPLICATE_SUPPORTED_MODEL_IDS[task]?.[params.model];
176
- case "sambanova":
177
- return SAMBANOVA_SUPPORTED_MODEL_IDS[task]?.[params.model];
178
- case "together":
179
- return TOGETHER_SUPPORTED_MODEL_IDS[task]?.[params.model];
180
- }
181
- })();
182
-
183
- if (!model) {
184
- throw new Error(`Model ${params.model} is not supported for task ${task} and provider ${params.provider}`);
185
- }
186
- return model;
187
- }
188
-
189
154
  function makeUrl(params: {
190
155
  authMethod: "none" | "hf-token" | "credentials-include" | "provider-key";
191
156
  chatCompletion: boolean;
@@ -0,0 +1,15 @@
1
+ import type { ModelId } from "../types";
2
+
3
+ type ProviderId = string;
4
+
5
+ /**
6
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co
7
+ * for a given Inference Provider,
8
+ * you can add it to the following dictionary, for dev purposes.
9
+ */
10
+ export const HARDCODED_MODEL_ID_MAPPING: Record<ModelId, ProviderId> = {
11
+ /**
12
+ * "HF model ID" => "Model ID on Inference Provider's side"
13
+ */
14
+ // "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
15
+ };
@@ -1,31 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
-
3
1
  export const FAL_AI_API_BASE_URL = "https://fal.run";
4
2
 
5
- type FalAiId = string;
6
-
7
- export const FAL_AI_SUPPORTED_MODEL_IDS: ProviderMapping<FalAiId> = {
8
- "text-to-image": {
9
- "black-forest-labs/FLUX.1-schnell": "fal-ai/flux/schnell",
10
- "black-forest-labs/FLUX.1-dev": "fal-ai/flux/dev",
11
- "playgroundai/playground-v2.5-1024px-aesthetic": "fal-ai/playground-v25",
12
- "ByteDance/SDXL-Lightning": "fal-ai/lightning-models",
13
- "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": "fal-ai/pixart-sigma",
14
- "stabilityai/stable-diffusion-3-medium": "fal-ai/stable-diffusion-v3-medium",
15
- "Warlord-K/Sana-1024": "fal-ai/sana",
16
- "fal/AuraFlow-v0.2": "fal-ai/aura-flow",
17
- "stabilityai/stable-diffusion-3.5-large": "fal-ai/stable-diffusion-v35-large",
18
- "stabilityai/stable-diffusion-3.5-large-turbo": "fal-ai/stable-diffusion-v35-large/turbo",
19
- "stabilityai/stable-diffusion-3.5-medium": "fal-ai/stable-diffusion-v35-medium",
20
- "Kwai-Kolors/Kolors": "fal-ai/kolors",
21
- },
22
- "automatic-speech-recognition": {
23
- "openai/whisper-large-v3": "fal-ai/whisper",
24
- },
25
- "text-to-video": {
26
- "genmo/mochi-1-preview": "fal-ai/mochi-v1",
27
- "tencent/HunyuanVideo": "fal-ai/hunyuan-video",
28
- "THUDM/CogVideoX-5b": "fal-ai/cogvideox-5b",
29
- "Lightricks/LTX-Video": "fal-ai/ltx-video",
30
- },
31
- };
3
+ /**
4
+ * See the registered mapping of HF model ID => Fal model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/fal-ai/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Fal and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Fal, please open an issue on the present repo
15
+ * and we will tag Fal team members.
16
+ *
17
+ * Thanks!
18
+ */
@@ -1,29 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
-
3
1
  export const REPLICATE_API_BASE_URL = "https://api.replicate.com";
4
2
 
5
- type ReplicateId = string;
6
-
7
- export const REPLICATE_SUPPORTED_MODEL_IDS: ProviderMapping<ReplicateId> = {
8
- "text-to-image": {
9
- "black-forest-labs/FLUX.1-dev": "black-forest-labs/flux-dev",
10
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/flux-schnell",
11
- "ByteDance/Hyper-SD":
12
- "bytedance/hyper-flux-16step:382cf8959fb0f0d665b26e7e80b8d6dc3faaef1510f14ce017e8c732bb3d1eb7",
13
- "ByteDance/SDXL-Lightning":
14
- "bytedance/sdxl-lightning-4step:5599ed30703defd1d160a25a63321b4dec97101d98b4674bcc56e41f62f35637",
15
- "playgroundai/playground-v2.5-1024px-aesthetic":
16
- "playgroundai/playground-v2.5-1024px-aesthetic:a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
17
- "stabilityai/stable-diffusion-3.5-large-turbo": "stability-ai/stable-diffusion-3.5-large-turbo",
18
- "stabilityai/stable-diffusion-3.5-large": "stability-ai/stable-diffusion-3.5-large",
19
- "stabilityai/stable-diffusion-3.5-medium": "stability-ai/stable-diffusion-3.5-medium",
20
- "stabilityai/stable-diffusion-xl-base-1.0":
21
- "stability-ai/sdxl:7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc",
22
- },
23
- "text-to-speech": {
24
- "OuteAI/OuteTTS-0.3-500M": "jbilcke/oute-tts:39a59319327b27327fa3095149c5a746e7f2aee18c75055c3368237a6503cd26",
25
- },
26
- "text-to-video": {
27
- "genmo/mochi-1-preview": "genmoai/mochi-1:1944af04d098ef69bed7f9d335d102e652203f268ec4aaa2d836f6217217e460",
28
- },
29
- };
3
+ /**
4
+ * See the registered mapping of HF model ID => Replicate model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/replicate/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Replicate and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Replicate, please open an issue on the present repo
15
+ * and we will tag Replicate team members.
16
+ *
17
+ * Thanks!
18
+ */
@@ -1,23 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
-
3
1
  export const SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
4
2
 
5
- type SambanovaId = string;
6
-
7
- export const SAMBANOVA_SUPPORTED_MODEL_IDS: ProviderMapping<SambanovaId> = {
8
- /** Chat completion / conversational */
9
- conversational: {
10
- "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
11
- "Qwen/Qwen2.5-72B-Instruct": "Qwen2.5-72B-Instruct",
12
- "Qwen/QwQ-32B-Preview": "QwQ-32B-Preview",
13
- "meta-llama/Llama-3.3-70B-Instruct": "Meta-Llama-3.3-70B-Instruct",
14
- "meta-llama/Llama-3.2-1B-Instruct": "Meta-Llama-3.2-1B-Instruct",
15
- "meta-llama/Llama-3.2-3B-Instruct": "Meta-Llama-3.2-3B-Instruct",
16
- "meta-llama/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
17
- "meta-llama/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
18
- "meta-llama/Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
19
- "meta-llama/Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
20
- "meta-llama/Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
21
- "meta-llama/Llama-Guard-3-8B": "Meta-Llama-Guard-3-8B",
22
- },
23
- };
3
+ /**
4
+ * See the registered mapping of HF model ID => Sambanova model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/sambanova/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Sambanova and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Sambanova, please open an issue on the present repo
15
+ * and we will tag Sambanova team members.
16
+ *
17
+ * Thanks!
18
+ */
@@ -1,59 +1,18 @@
1
- import type { ProviderMapping } from "./types";
2
-
3
1
  export const TOGETHER_API_BASE_URL = "https://api.together.xyz";
4
2
 
5
3
  /**
6
- * Same comment as in sambanova.ts
7
- */
8
- type TogetherId = string;
9
-
10
- /**
11
- * https://docs.together.ai/reference/models-1
4
+ * See the registered mapping of HF model ID => Together model ID here:
5
+ *
6
+ * https://huggingface.co/api/partners/together/models
7
+ *
8
+ * This is a publicly available mapping.
9
+ *
10
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
11
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
12
+ *
13
+ * - If you work at Together and want to update this mapping, please use the model mapping API we provide on huggingface.co
14
+ * - If you're a community member and want to add a new supported HF model to Together, please open an issue on the present repo
15
+ * and we will tag Together team members.
16
+ *
17
+ * Thanks!
12
18
  */
13
- export const TOGETHER_SUPPORTED_MODEL_IDS: ProviderMapping<TogetherId> = {
14
- "text-to-image": {
15
- "black-forest-labs/FLUX.1-Canny-dev": "black-forest-labs/FLUX.1-canny",
16
- "black-forest-labs/FLUX.1-Depth-dev": "black-forest-labs/FLUX.1-depth",
17
- "black-forest-labs/FLUX.1-dev": "black-forest-labs/FLUX.1-dev",
18
- "black-forest-labs/FLUX.1-Redux-dev": "black-forest-labs/FLUX.1-redux",
19
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-pro",
20
- "stabilityai/stable-diffusion-xl-base-1.0": "stabilityai/stable-diffusion-xl-base-1.0",
21
- },
22
- conversational: {
23
- "databricks/dbrx-instruct": "databricks/dbrx-instruct",
24
- "deepseek-ai/DeepSeek-R1": "deepseek-ai/DeepSeek-R1",
25
- "deepseek-ai/DeepSeek-V3": "deepseek-ai/DeepSeek-V3",
26
- "deepseek-ai/deepseek-llm-67b-chat": "deepseek-ai/deepseek-llm-67b-chat",
27
- "google/gemma-2-9b-it": "google/gemma-2-9b-it",
28
- "google/gemma-2b-it": "google/gemma-2-27b-it",
29
- "meta-llama/Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
30
- "meta-llama/Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
31
- "meta-llama/Llama-3.2-11B-Vision-Instruct": "meta-llama/Llama-Vision-Free",
32
- "meta-llama/Llama-3.2-3B-Instruct": "meta-llama/Llama-3.2-3B-Instruct-Turbo",
33
- "meta-llama/Llama-3.2-90B-Vision-Instruct": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
34
- "meta-llama/Llama-3.3-70B-Instruct": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
35
- "meta-llama/Meta-Llama-3-70B-Instruct": "meta-llama/Llama-3-70b-chat-hf",
36
- "meta-llama/Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
37
- "meta-llama/Meta-Llama-3.1-405B-Instruct": "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
38
- "meta-llama/Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
39
- "meta-llama/Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo-128K",
40
- "microsoft/WizardLM-2-8x22B": "microsoft/WizardLM-2-8x22B",
41
- "mistralai/Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
42
- "mistralai/Mistral-Small-24B-Instruct-2501": "mistralai/Mistral-Small-24B-Instruct-2501",
43
- "mistralai/Mixtral-8x22B-Instruct-v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1",
44
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
45
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
46
- "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
47
- "Qwen/Qwen2-72B-Instruct": "Qwen/Qwen2-72B-Instruct",
48
- "Qwen/Qwen2.5-72B-Instruct": "Qwen/Qwen2.5-72B-Instruct-Turbo",
49
- "Qwen/Qwen2.5-7B-Instruct": "Qwen/Qwen2.5-7B-Instruct-Turbo",
50
- "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen/Qwen2.5-Coder-32B-Instruct",
51
- "Qwen/QwQ-32B-Preview": "Qwen/QwQ-32B-Preview",
52
- "scb10x/llama-3-typhoon-v1.5-8b-instruct": "scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct",
53
- "scb10x/llama-3-typhoon-v1.5x-70b-instruct-awq": "scb10x/scb10x-llama3-typhoon-v1-5x-4f316",
54
- },
55
- "text-generation": {
56
- "meta-llama/Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
57
- "mistralai/Mixtral-8x7B-v0.1": "mistralai/Mixtral-8x7B-v0.1",
58
- },
59
- };
@@ -1,8 +1,8 @@
1
1
  import type { TextToSpeechInput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
+ import { omit } from "../../utils/omit";
4
5
  import { request } from "../custom/request";
5
-
6
6
  type TextToSpeechArgs = BaseArgs & TextToSpeechInput;
7
7
 
8
8
  interface OutputUrlTextToSpeechGeneration {
@@ -13,7 +13,16 @@ interface OutputUrlTextToSpeechGeneration {
13
13
  * Recommended model: espnet/kan-bayashi_ljspeech_vits
14
14
  */
15
15
  export async function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise<Blob> {
16
- const res = await request<Blob | OutputUrlTextToSpeechGeneration>(args, {
16
+ // Replicate models expects "text" instead of "inputs"
17
+ const payload =
18
+ args.provider === "replicate"
19
+ ? {
20
+ ...omit(args, ["inputs", "parameters"]),
21
+ ...args.parameters,
22
+ text: args.inputs,
23
+ }
24
+ : args;
25
+ const res = await request<Blob | OutputUrlTextToSpeechGeneration>(payload, {
17
26
  ...options,
18
27
  taskHint: "text-to-speech",
19
28
  });
package/src/types.ts CHANGED
@@ -1,5 +1,4 @@
1
- import type { PipelineType } from "@huggingface/tasks";
2
- import type { ChatCompletionInput } from "@huggingface/tasks";
1
+ import type { ChatCompletionInput, PipelineType } from "@huggingface/tasks";
3
2
 
4
3
  /**
5
4
  * HF model id, like "meta-llama/Llama-3.3-70B-Instruct"
@@ -88,6 +87,7 @@ export type RequestArgs = BaseArgs &
88
87
  | { data: Blob | ArrayBuffer }
89
88
  | { inputs: unknown }
90
89
  | { prompt: string }
90
+ | { text: string }
91
91
  | { audio_url: string }
92
92
  | ChatCompletionInput
93
93
  ) & {
@@ -1,4 +0,0 @@
1
- import type { WidgetType } from "@huggingface/tasks";
2
- import type { ModelId } from "../types";
3
- export type ProviderMapping<ProviderId extends string> = Partial<Record<WidgetType, Partial<Record<ModelId, ProviderId>>>>;
4
- //# sourceMappingURL=types.d.ts.map