@huggingface/inference 3.9.1 → 3.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,7 +1,7 @@
1
1
  # 🤗 Hugging Face Inference
2
2
 
3
- A Typescript powered wrapper for the HF Inference API (serverless), Inference Endpoints (dedicated), and all supported Inference Providers.
4
- It works with [Inference API (serverless)](https://huggingface.co/docs/api-inference/index) and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index), and even with all supported third-party Inference Providers.
3
+ A Typescript powered wrapper for Inference Providers (serverless) and Inference Endpoints (dedicated).
4
+ It works with [Inference Providers (serverless)](https://huggingface.co/docs/api-inference/index) – including all supported third-party Inference Providers – and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index), and even with .
5
5
 
6
6
  Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README).
7
7
 
@@ -25,20 +25,20 @@ yarn add @huggingface/inference
25
25
 
26
26
  ```ts
27
27
  // esm.sh
28
- import { InferenceClient } from "https://esm.sh/@huggingface/inference"
28
+ import { InferenceClient } from "https://esm.sh/@huggingface/inference";
29
29
  // or npm:
30
- import { InferenceClient } from "npm:@huggingface/inference"
30
+ import { InferenceClient } from "npm:@huggingface/inference";
31
31
  ```
32
32
 
33
33
  ### Initialize
34
34
 
35
35
  ```typescript
36
- import { InferenceClient } from '@huggingface/inference'
36
+ import { InferenceClient } from '@huggingface/inference';
37
37
 
38
- const hf = new InferenceClient('your access token')
38
+ const hf = new InferenceClient('your access token');
39
39
  ```
40
40
 
41
- ❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
41
+ ❗**Important note:** Always pass an access token. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
42
42
 
43
43
  Your access token should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the access token.
44
44
 
@@ -54,6 +54,7 @@ Currently, we support the following providers:
54
54
  - [Nebius](https://studio.nebius.ai)
55
55
  - [Novita](https://novita.ai/?utm_source=github_huggingface&utm_medium=github_readme&utm_campaign=link)
56
56
  - [Nscale](https://nscale.com)
57
+ - [OVHcloud](https://endpoints.ai.cloud.ovh.net/)
57
58
  - [Replicate](https://replicate.com)
58
59
  - [Sambanova](https://sambanova.ai)
59
60
  - [Together](https://together.xyz)
@@ -84,6 +85,7 @@ Only a subset of models are supported when requesting third-party providers. You
84
85
  - [Hyperbolic supported models](https://huggingface.co/api/partners/hyperbolic/models)
85
86
  - [Nebius supported models](https://huggingface.co/api/partners/nebius/models)
86
87
  - [Nscale supported models](https://huggingface.co/api/partners/nscale/models)
88
+ - [OVHcloud supported models](https://huggingface.co/api/partners/ovhcloud/models)
87
89
  - [Replicate supported models](https://huggingface.co/api/partners/replicate/models)
88
90
  - [Sambanova supported models](https://huggingface.co/api/partners/sambanova/models)
89
91
  - [Together supported models](https://huggingface.co/api/partners/together/models)
package/dist/index.cjs CHANGED
@@ -342,15 +342,7 @@ var FalAITextToImageTask = class extends FalAITask {
342
342
  ...omit(params.args, ["inputs", "parameters"]),
343
343
  ...params.args.parameters,
344
344
  sync_mode: true,
345
- prompt: params.args.inputs,
346
- ...params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath ? {
347
- loras: [
348
- {
349
- path: buildLoraPath(params.mapping.hfModelId, params.mapping.adapterWeightsPath),
350
- scale: 1
351
- }
352
- ]
353
- } : void 0
345
+ prompt: params.args.inputs
354
346
  };
355
347
  if (params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath) {
356
348
  payload.loras = [
@@ -1033,6 +1025,39 @@ var OpenAIConversationalTask = class extends BaseConversationalTask {
1033
1025
  }
1034
1026
  };
1035
1027
 
1028
+ // src/providers/ovhcloud.ts
1029
+ var OVHCLOUD_API_BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net";
1030
+ var OvhCloudConversationalTask = class extends BaseConversationalTask {
1031
+ constructor() {
1032
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
1033
+ }
1034
+ };
1035
+ var OvhCloudTextGenerationTask = class extends BaseTextGenerationTask {
1036
+ constructor() {
1037
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
1038
+ }
1039
+ preparePayload(params) {
1040
+ return {
1041
+ model: params.model,
1042
+ ...omit(params.args, ["inputs", "parameters"]),
1043
+ ...params.args.parameters ? {
1044
+ max_tokens: params.args.parameters.max_new_tokens,
1045
+ ...omit(params.args.parameters, "max_new_tokens")
1046
+ } : void 0,
1047
+ prompt: params.args.inputs
1048
+ };
1049
+ }
1050
+ async getResponse(response) {
1051
+ if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
1052
+ const completion = response.choices[0];
1053
+ return {
1054
+ generated_text: completion.text
1055
+ };
1056
+ }
1057
+ throw new InferenceOutputError("Expected OVHcloud text generation response format");
1058
+ }
1059
+ };
1060
+
1036
1061
  // src/providers/replicate.ts
1037
1062
  var ReplicateTask = class extends TaskProviderHelper {
1038
1063
  constructor(url) {
@@ -1285,6 +1310,10 @@ var PROVIDERS = {
1285
1310
  openai: {
1286
1311
  conversational: new OpenAIConversationalTask()
1287
1312
  },
1313
+ ovhcloud: {
1314
+ conversational: new OvhCloudConversationalTask(),
1315
+ "text-generation": new OvhCloudTextGenerationTask()
1316
+ },
1288
1317
  replicate: {
1289
1318
  "text-to-image": new ReplicateTextToImageTask(),
1290
1319
  "text-to-speech": new ReplicateTextToSpeechTask(),
@@ -1323,7 +1352,7 @@ function getProviderHelper(provider, task) {
1323
1352
 
1324
1353
  // package.json
1325
1354
  var name = "@huggingface/inference";
1326
- var version = "3.9.1";
1355
+ var version = "3.10.0";
1327
1356
 
1328
1357
  // src/providers/consts.ts
1329
1358
  var HARDCODED_MODEL_INFERENCE_MAPPING = {
@@ -1346,6 +1375,7 @@ var HARDCODED_MODEL_INFERENCE_MAPPING = {
1346
1375
  novita: {},
1347
1376
  nscale: {},
1348
1377
  openai: {},
1378
+ ovhcloud: {},
1349
1379
  replicate: {},
1350
1380
  sambanova: {},
1351
1381
  together: {}
@@ -1388,22 +1418,6 @@ async function getInferenceProviderMapping(params, options) {
1388
1418
  `Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
1389
1419
  );
1390
1420
  }
1391
- if (providerMapping.adapter === "lora") {
1392
- const treeResp = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${params.modelId}/tree/main`);
1393
- if (!treeResp.ok) {
1394
- throw new Error(`Unable to fetch the model tree for ${params.modelId}.`);
1395
- }
1396
- const tree = await treeResp.json();
1397
- const adapterWeightsPath = tree.find(({ type, path }) => type === "file" && path.endsWith(".safetensors"))?.path;
1398
- if (!adapterWeightsPath) {
1399
- throw new Error(`No .safetensors file found in the model tree for ${params.modelId}.`);
1400
- }
1401
- return {
1402
- ...providerMapping,
1403
- hfModelId: params.modelId,
1404
- adapterWeightsPath
1405
- };
1406
- }
1407
1421
  return { ...providerMapping, hfModelId: params.modelId };
1408
1422
  }
1409
1423
  return null;
@@ -2272,6 +2286,7 @@ var INFERENCE_PROVIDERS = [
2272
2286
  "novita",
2273
2287
  "nscale",
2274
2288
  "openai",
2289
+ "ovhcloud",
2275
2290
  "replicate",
2276
2291
  "sambanova",
2277
2292
  "together"
@@ -2296,6 +2311,7 @@ var templates = {
2296
2311
  "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
2297
2312
  "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2298
2313
  "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
2314
+ "textToSpeech": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2299
2315
  "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
2300
2316
  },
2301
2317
  "huggingface.js": {
@@ -2317,11 +2333,23 @@ const image = await client.textToImage({
2317
2333
  billTo: "{{ billTo }}",
2318
2334
  }{% endif %});
2319
2335
  /// Use the generated image (it's a Blob)`,
2336
+ "textToSpeech": `import { InferenceClient } from "@huggingface/inference";
2337
+
2338
+ const client = new InferenceClient("{{ accessToken }}");
2339
+
2340
+ const audio = await client.textToSpeech({
2341
+ provider: "{{ provider }}",
2342
+ model: "{{ model.id }}",
2343
+ inputs: {{ inputs.asObj.inputs }},
2344
+ }{% if billTo %}, {
2345
+ billTo: "{{ billTo }}",
2346
+ }{% endif %});
2347
+ // Use the generated audio (it's a Blob)`,
2320
2348
  "textToVideo": `import { InferenceClient } from "@huggingface/inference";
2321
2349
 
2322
2350
  const client = new InferenceClient("{{ accessToken }}");
2323
2351
 
2324
- const image = await client.textToVideo({
2352
+ const video = await client.textToVideo({
2325
2353
  provider: "{{ provider }}",
2326
2354
  model: "{{ model.id }}",
2327
2355
  inputs: {{ inputs.asObj.inputs }},
@@ -2337,7 +2365,7 @@ const image = await client.textToVideo({
2337
2365
  },
2338
2366
  "python": {
2339
2367
  "fal_client": {
2340
- "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
2368
+ "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n "loras":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} '
2341
2369
  },
2342
2370
  "huggingface_hub": {
2343
2371
  "basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
@@ -2349,6 +2377,7 @@ const image = await client.textToVideo({
2349
2377
  "imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
2350
2378
  "importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n bill_to="{{ billTo }}",\n{% endif %}\n)',
2351
2379
  "textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
2380
+ "textToSpeech": '# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) \n',
2352
2381
  "textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
2353
2382
  },
2354
2383
  "openai": {
@@ -2365,8 +2394,9 @@ const image = await client.textToVideo({
2365
2394
  "imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
2366
2395
  "importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {\n "Authorization": "{{ authorizationHeader }}",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}"\n{% endif %}\n}',
2367
2396
  "tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
2368
- "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2397
+ "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2369
2398
  "textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
2399
+ "textToSpeech": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2370
2400
  "zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
2371
2401
  "zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
2372
2402
  }
@@ -2469,6 +2499,7 @@ var HF_JS_METHODS = {
2469
2499
  "text-generation": "textGeneration",
2470
2500
  "text2text-generation": "textGeneration",
2471
2501
  "token-classification": "tokenClassification",
2502
+ "text-to-speech": "textToSpeech",
2472
2503
  translation: "translation"
2473
2504
  };
2474
2505
  var snippetGenerator = (templateName, inputPreparationFn) => {
@@ -2615,7 +2646,7 @@ var snippets = {
2615
2646
  "text-generation": snippetGenerator("basic"),
2616
2647
  "text-to-audio": snippetGenerator("textToAudio"),
2617
2648
  "text-to-image": snippetGenerator("textToImage"),
2618
- "text-to-speech": snippetGenerator("textToAudio"),
2649
+ "text-to-speech": snippetGenerator("textToSpeech"),
2619
2650
  "text-to-video": snippetGenerator("textToVideo"),
2620
2651
  "text2text-generation": snippetGenerator("basic"),
2621
2652
  "token-classification": snippetGenerator("basic"),
package/dist/index.js CHANGED
@@ -285,15 +285,7 @@ var FalAITextToImageTask = class extends FalAITask {
285
285
  ...omit(params.args, ["inputs", "parameters"]),
286
286
  ...params.args.parameters,
287
287
  sync_mode: true,
288
- prompt: params.args.inputs,
289
- ...params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath ? {
290
- loras: [
291
- {
292
- path: buildLoraPath(params.mapping.hfModelId, params.mapping.adapterWeightsPath),
293
- scale: 1
294
- }
295
- ]
296
- } : void 0
288
+ prompt: params.args.inputs
297
289
  };
298
290
  if (params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath) {
299
291
  payload.loras = [
@@ -976,6 +968,39 @@ var OpenAIConversationalTask = class extends BaseConversationalTask {
976
968
  }
977
969
  };
978
970
 
971
+ // src/providers/ovhcloud.ts
972
+ var OVHCLOUD_API_BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net";
973
+ var OvhCloudConversationalTask = class extends BaseConversationalTask {
974
+ constructor() {
975
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
976
+ }
977
+ };
978
+ var OvhCloudTextGenerationTask = class extends BaseTextGenerationTask {
979
+ constructor() {
980
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
981
+ }
982
+ preparePayload(params) {
983
+ return {
984
+ model: params.model,
985
+ ...omit(params.args, ["inputs", "parameters"]),
986
+ ...params.args.parameters ? {
987
+ max_tokens: params.args.parameters.max_new_tokens,
988
+ ...omit(params.args.parameters, "max_new_tokens")
989
+ } : void 0,
990
+ prompt: params.args.inputs
991
+ };
992
+ }
993
+ async getResponse(response) {
994
+ if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
995
+ const completion = response.choices[0];
996
+ return {
997
+ generated_text: completion.text
998
+ };
999
+ }
1000
+ throw new InferenceOutputError("Expected OVHcloud text generation response format");
1001
+ }
1002
+ };
1003
+
979
1004
  // src/providers/replicate.ts
980
1005
  var ReplicateTask = class extends TaskProviderHelper {
981
1006
  constructor(url) {
@@ -1228,6 +1253,10 @@ var PROVIDERS = {
1228
1253
  openai: {
1229
1254
  conversational: new OpenAIConversationalTask()
1230
1255
  },
1256
+ ovhcloud: {
1257
+ conversational: new OvhCloudConversationalTask(),
1258
+ "text-generation": new OvhCloudTextGenerationTask()
1259
+ },
1231
1260
  replicate: {
1232
1261
  "text-to-image": new ReplicateTextToImageTask(),
1233
1262
  "text-to-speech": new ReplicateTextToSpeechTask(),
@@ -1266,7 +1295,7 @@ function getProviderHelper(provider, task) {
1266
1295
 
1267
1296
  // package.json
1268
1297
  var name = "@huggingface/inference";
1269
- var version = "3.9.1";
1298
+ var version = "3.10.0";
1270
1299
 
1271
1300
  // src/providers/consts.ts
1272
1301
  var HARDCODED_MODEL_INFERENCE_MAPPING = {
@@ -1289,6 +1318,7 @@ var HARDCODED_MODEL_INFERENCE_MAPPING = {
1289
1318
  novita: {},
1290
1319
  nscale: {},
1291
1320
  openai: {},
1321
+ ovhcloud: {},
1292
1322
  replicate: {},
1293
1323
  sambanova: {},
1294
1324
  together: {}
@@ -1331,22 +1361,6 @@ async function getInferenceProviderMapping(params, options) {
1331
1361
  `Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
1332
1362
  );
1333
1363
  }
1334
- if (providerMapping.adapter === "lora") {
1335
- const treeResp = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${params.modelId}/tree/main`);
1336
- if (!treeResp.ok) {
1337
- throw new Error(`Unable to fetch the model tree for ${params.modelId}.`);
1338
- }
1339
- const tree = await treeResp.json();
1340
- const adapterWeightsPath = tree.find(({ type, path }) => type === "file" && path.endsWith(".safetensors"))?.path;
1341
- if (!adapterWeightsPath) {
1342
- throw new Error(`No .safetensors file found in the model tree for ${params.modelId}.`);
1343
- }
1344
- return {
1345
- ...providerMapping,
1346
- hfModelId: params.modelId,
1347
- adapterWeightsPath
1348
- };
1349
- }
1350
1364
  return { ...providerMapping, hfModelId: params.modelId };
1351
1365
  }
1352
1366
  return null;
@@ -2215,6 +2229,7 @@ var INFERENCE_PROVIDERS = [
2215
2229
  "novita",
2216
2230
  "nscale",
2217
2231
  "openai",
2232
+ "ovhcloud",
2218
2233
  "replicate",
2219
2234
  "sambanova",
2220
2235
  "together"
@@ -2242,6 +2257,7 @@ var templates = {
2242
2257
  "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
2243
2258
  "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2244
2259
  "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
2260
+ "textToSpeech": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2245
2261
  "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
2246
2262
  },
2247
2263
  "huggingface.js": {
@@ -2263,11 +2279,23 @@ const image = await client.textToImage({
2263
2279
  billTo: "{{ billTo }}",
2264
2280
  }{% endif %});
2265
2281
  /// Use the generated image (it's a Blob)`,
2282
+ "textToSpeech": `import { InferenceClient } from "@huggingface/inference";
2283
+
2284
+ const client = new InferenceClient("{{ accessToken }}");
2285
+
2286
+ const audio = await client.textToSpeech({
2287
+ provider: "{{ provider }}",
2288
+ model: "{{ model.id }}",
2289
+ inputs: {{ inputs.asObj.inputs }},
2290
+ }{% if billTo %}, {
2291
+ billTo: "{{ billTo }}",
2292
+ }{% endif %});
2293
+ // Use the generated audio (it's a Blob)`,
2266
2294
  "textToVideo": `import { InferenceClient } from "@huggingface/inference";
2267
2295
 
2268
2296
  const client = new InferenceClient("{{ accessToken }}");
2269
2297
 
2270
- const image = await client.textToVideo({
2298
+ const video = await client.textToVideo({
2271
2299
  provider: "{{ provider }}",
2272
2300
  model: "{{ model.id }}",
2273
2301
  inputs: {{ inputs.asObj.inputs }},
@@ -2283,7 +2311,7 @@ const image = await client.textToVideo({
2283
2311
  },
2284
2312
  "python": {
2285
2313
  "fal_client": {
2286
- "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
2314
+ "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n "loras":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} '
2287
2315
  },
2288
2316
  "huggingface_hub": {
2289
2317
  "basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
@@ -2295,6 +2323,7 @@ const image = await client.textToVideo({
2295
2323
  "imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
2296
2324
  "importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n bill_to="{{ billTo }}",\n{% endif %}\n)',
2297
2325
  "textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
2326
+ "textToSpeech": '# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) \n',
2298
2327
  "textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
2299
2328
  },
2300
2329
  "openai": {
@@ -2311,8 +2340,9 @@ const image = await client.textToVideo({
2311
2340
  "imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
2312
2341
  "importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {\n "Authorization": "{{ authorizationHeader }}",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}"\n{% endif %}\n}',
2313
2342
  "tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
2314
- "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2343
+ "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2315
2344
  "textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
2345
+ "textToSpeech": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2316
2346
  "zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
2317
2347
  "zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
2318
2348
  }
@@ -2415,6 +2445,7 @@ var HF_JS_METHODS = {
2415
2445
  "text-generation": "textGeneration",
2416
2446
  "text2text-generation": "textGeneration",
2417
2447
  "token-classification": "tokenClassification",
2448
+ "text-to-speech": "textToSpeech",
2418
2449
  translation: "translation"
2419
2450
  };
2420
2451
  var snippetGenerator = (templateName, inputPreparationFn) => {
@@ -2561,7 +2592,7 @@ var snippets = {
2561
2592
  "text-generation": snippetGenerator("basic"),
2562
2593
  "text-to-audio": snippetGenerator("textToAudio"),
2563
2594
  "text-to-image": snippetGenerator("textToImage"),
2564
- "text-to-speech": snippetGenerator("textToAudio"),
2595
+ "text-to-speech": snippetGenerator("textToSpeech"),
2565
2596
  "text-to-video": snippetGenerator("textToVideo"),
2566
2597
  "text2text-generation": snippetGenerator("basic"),
2567
2598
  "token-classification": snippetGenerator("basic"),
@@ -1,7 +1,7 @@
1
1
  import type { WidgetType } from "@huggingface/tasks";
2
2
  import type { InferenceProvider, ModelId } from "../types";
3
- export declare const inferenceProviderMappingCache: Map<string, Partial<Record<"black-forest-labs" | "cerebras" | "cohere" | "fal-ai" | "featherless-ai" | "fireworks-ai" | "groq" | "hf-inference" | "hyperbolic" | "nebius" | "novita" | "nscale" | "openai" | "replicate" | "sambanova" | "together", Omit<InferenceProviderModelMapping, "hfModelId" | "adapterWeightsPath">>>>;
4
- export type InferenceProviderMapping = Partial<Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId" | "adapterWeightsPath">>>;
3
+ export declare const inferenceProviderMappingCache: Map<string, Partial<Record<"black-forest-labs" | "cerebras" | "cohere" | "fal-ai" | "featherless-ai" | "fireworks-ai" | "groq" | "hf-inference" | "hyperbolic" | "nebius" | "novita" | "nscale" | "openai" | "ovhcloud" | "replicate" | "sambanova" | "together", Omit<InferenceProviderModelMapping, "hfModelId">>>>;
4
+ export type InferenceProviderMapping = Partial<Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId">>>;
5
5
  export interface InferenceProviderModelMapping {
6
6
  adapter?: string;
7
7
  adapterWeightsPath?: string;
@@ -1 +1 @@
1
- {"version":3,"file":"getInferenceProviderMapping.d.ts","sourceRoot":"","sources":["../../../src/lib/getInferenceProviderMapping.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,KAAK,EAAE,iBAAiB,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAM3D,eAAO,MAAM,6BAA6B,iUAA+C,CAAC;AAE1F,MAAM,MAAM,wBAAwB,GAAG,OAAO,CAC7C,MAAM,CAAC,iBAAiB,EAAE,IAAI,CAAC,6BAA6B,EAAE,WAAW,GAAG,oBAAoB,CAAC,CAAC,CAClG,CAAC;AAEF,MAAM,WAAW,6BAA6B;IAC7C,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,SAAS,EAAE,OAAO,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,GAAG,SAAS,CAAC;IAC3B,IAAI,EAAE,UAAU,CAAC;CACjB;AAED,wBAAsB,2BAA2B,CAChD,MAAM,EAAE;IACP,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,iBAAiB,CAAC;IAC5B,IAAI,EAAE,UAAU,CAAC;CACjB,EACD,OAAO,EAAE;IACR,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,WAAW,EAAE,IAAI,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,QAAQ,CAAC,CAAC;CACtE,GACC,OAAO,CAAC,6BAA6B,GAAG,IAAI,CAAC,CA+D/C"}
1
+ {"version":3,"file":"getInferenceProviderMapping.d.ts","sourceRoot":"","sources":["../../../src/lib/getInferenceProviderMapping.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,KAAK,EAAE,iBAAiB,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAM3D,eAAO,MAAM,6BAA6B,uTAA+C,CAAC;AAE1F,MAAM,MAAM,wBAAwB,GAAG,OAAO,CAC7C,MAAM,CAAC,iBAAiB,EAAE,IAAI,CAAC,6BAA6B,EAAE,WAAW,CAAC,CAAC,CAC3E,CAAC;AAEF,MAAM,WAAW,6BAA6B;IAC7C,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,SAAS,EAAE,OAAO,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,GAAG,SAAS,CAAC;IAC3B,IAAI,EAAE,UAAU,CAAC;CACjB;AAED,wBAAsB,2BAA2B,CAChD,MAAM,EAAE;IACP,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,iBAAiB,CAAC;IAC5B,IAAI,EAAE,UAAU,CAAC;CACjB,EACD,OAAO,EAAE;IACR,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,WAAW,EAAE,IAAI,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,QAAQ,CAAC,CAAC;CACtE,GACC,OAAO,CAAC,6BAA6B,GAAG,IAAI,CAAC,CA+C/C"}
@@ -1 +1 @@
1
- {"version":3,"file":"getProviderHelper.d.ts","sourceRoot":"","sources":["../../../src/lib/getProviderHelper.ts"],"names":[],"mappings":"AAaA,OAAO,KAAK,EACX,6BAA6B,EAC7B,sBAAsB,EACtB,oCAAoC,EACpC,wBAAwB,EACxB,mCAAmC,EACnC,2BAA2B,EAC3B,kBAAkB,EAClB,6BAA6B,EAC7B,2BAA2B,EAC3B,sBAAsB,EACtB,qBAAqB,EACrB,yBAAyB,EACzB,2BAA2B,EAC3B,4BAA4B,EAC5B,uBAAuB,EACvB,gCAAgC,EAChC,+BAA+B,EAC/B,2BAA2B,EAC3B,kBAAkB,EAClB,4BAA4B,EAC5B,wBAAwB,EACxB,qBAAqB,EACrB,qBAAqB,EACrB,sBAAsB,EACtB,qBAAqB,EACrB,6BAA6B,EAC7B,qBAAqB,EACrB,iCAAiC,EACjC,gCAAgC,EAChC,qCAAqC,EACrC,MAAM,6BAA6B,CAAC;AAIrC,OAAO,KAAK,EAAE,iBAAiB,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AAEjE,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,iBAAiB,EAAE,OAAO,CAAC,MAAM,CAAC,aAAa,EAAE,kBAAkB,CAAC,CAAC,CA4FnG,CAAC;AAEF;;GAEG;AACH,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,wBAAwB,GAAG,kBAAkB,CAAC;AACjD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,iBAAiB,GACrB,wBAAwB,GAAG,kBAAkB,CAAC;AACjD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,8BAA8B,GAClC,oCAAoC,GAAG,kBAAkB,CAAC;AAC7D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,qBAAqB,GACzB,4BAA4B,GAAG,kBAAkB,CAAC;AACrD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,WAAW,GACf,kBAAkB,GAAG,kBAAkB,CAAC;AAC3C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,6BAA6B,GACjC,mCAAmC,GAAG,kBAAkB,CAAC;AAC5D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,kBAAkB,GACtB,yBAAyB,GAAG,kBAAkB,CAAC;AAClD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gCAAgC,GACpC,qCAAqC,GAAG,kBAAkB,CAAC;AAC9D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,0BAA0B,GAC9B,gCAAgC,GAAG,kBAAkB,CAAC;AACzD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,qBAAqB,GACzB,4BAA4B,GAAG,kBAAkB,CAAC;AACrD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,0BAA0B,GAC9B,gCAAgC,GAAG,kBAAkB,CAAC;AACzD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,wBAAwB,GAC5B,+BAA+B,GAAG,kBAAkB,CAAC;AACxD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,aAAa,GACjB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,uBAAuB,GAAG,kBAAkB,CAAC;AAChD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,2BAA2B,GAC/B,iCAAiC,GAAG,kBAAkB,CAAC;AAC1D,wBAAgB,iBAAiB,CAAC,QAAQ,EAAE,iBAAiB,EAAE,IAAI,EAAE,aAAa,GAAG,SAAS,GAAG,kBAAkB,CAAC"}
1
+ {"version":3,"file":"getProviderHelper.d.ts","sourceRoot":"","sources":["../../../src/lib/getProviderHelper.ts"],"names":[],"mappings":"AAcA,OAAO,KAAK,EACX,6BAA6B,EAC7B,sBAAsB,EACtB,oCAAoC,EACpC,wBAAwB,EACxB,mCAAmC,EACnC,2BAA2B,EAC3B,kBAAkB,EAClB,6BAA6B,EAC7B,2BAA2B,EAC3B,sBAAsB,EACtB,qBAAqB,EACrB,yBAAyB,EACzB,2BAA2B,EAC3B,4BAA4B,EAC5B,uBAAuB,EACvB,gCAAgC,EAChC,+BAA+B,EAC/B,2BAA2B,EAC3B,kBAAkB,EAClB,4BAA4B,EAC5B,wBAAwB,EACxB,qBAAqB,EACrB,qBAAqB,EACrB,sBAAsB,EACtB,qBAAqB,EACrB,6BAA6B,EAC7B,qBAAqB,EACrB,iCAAiC,EACjC,gCAAgC,EAChC,qCAAqC,EACrC,MAAM,6BAA6B,CAAC;AAIrC,OAAO,KAAK,EAAE,iBAAiB,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AAEjE,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,iBAAiB,EAAE,OAAO,CAAC,MAAM,CAAC,aAAa,EAAE,kBAAkB,CAAC,CAAC,CAgGnG,CAAC;AAEF;;GAEG;AACH,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,wBAAwB,GAAG,kBAAkB,CAAC;AACjD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,iBAAiB,GACrB,wBAAwB,GAAG,kBAAkB,CAAC;AACjD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,8BAA8B,GAClC,oCAAoC,GAAG,kBAAkB,CAAC;AAC7D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,qBAAqB,GACzB,4BAA4B,GAAG,kBAAkB,CAAC;AACrD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,WAAW,GACf,kBAAkB,GAAG,kBAAkB,CAAC;AAC3C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,6BAA6B,GACjC,mCAAmC,GAAG,kBAAkB,CAAC;AAC5D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,kBAAkB,GACtB,yBAAyB,GAAG,kBAAkB,CAAC;AAClD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gCAAgC,GACpC,qCAAqC,GAAG,kBAAkB,CAAC;AAC9D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,0BAA0B,GAC9B,gCAAgC,GAAG,kBAAkB,CAAC;AACzD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,qBAAqB,GACzB,4BAA4B,GAAG,kBAAkB,CAAC;AACrD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,0BAA0B,GAC9B,gCAAgC,GAAG,kBAAkB,CAAC;AACzD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,wBAAwB,GAC5B,+BAA+B,GAAG,kBAAkB,CAAC;AACxD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,aAAa,GACjB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,uBAAuB,GAAG,kBAAkB,CAAC;AAChD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,2BAA2B,GAC/B,iCAAiC,GAAG,kBAAkB,CAAC;AAC1D,wBAAgB,iBAAiB,CAAC,QAAQ,EAAE,iBAAiB,EAAE,IAAI,EAAE,aAAa,GAAG,SAAS,GAAG,kBAAkB,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AACxF,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC;;;;;;GAMG;AACH,eAAO,MAAM,iCAAiC,EAAE,MAAM,CACrD,iBAAiB,EACjB,MAAM,CAAC,OAAO,EAAE,6BAA6B,CAAC,CAwB9C,CAAC"}
1
+ {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AACxF,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC;;;;;;GAMG;AACH,eAAO,MAAM,iCAAiC,EAAE,MAAM,CACrD,iBAAiB,EACjB,MAAM,CAAC,OAAO,EAAE,6BAA6B,CAAC,CAyB9C,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AACH,OAAO,KAAK,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAG3E,OAAO,KAAK,EAAE,UAAU,EAAE,YAAY,EAAW,SAAS,EAAE,MAAM,UAAU,CAAC;AAG7E,OAAO,EACN,KAAK,oCAAoC,EACzC,kBAAkB,EAClB,KAAK,qBAAqB,EAC1B,KAAK,qBAAqB,EAC1B,MAAM,kBAAkB,CAAC;AAG1B,MAAM,WAAW,gBAAgB;IAChC,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,CAAC;IACf,YAAY,EAAE,MAAM,CAAC;CACrB;AAED,UAAU,sBAAsB;IAC/B,MAAM,EAAE,KAAK,CAAC;QACb,GAAG,EAAE,MAAM,CAAC;KACZ,CAAC,CAAC;CACH;AAYD,eAAO,MAAM,2BAA2B,UAA0D,CAAC;AAEnG,uBAAe,SAAU,SAAQ,kBAAkB;gBACtC,GAAG,CAAC,EAAE,MAAM;IAIxB,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAG3D,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAG3B,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;CAUtF;AAMD,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;IAC1E,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAiCrD,WAAW,CAAC,QAAQ,EAAE,sBAAsB,EAAE,UAAU,CAAC,EAAE,KAAK,GAAG,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;CAkBjH;AAED,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;;IAI1E,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAMpC,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAQrD,WAAW,CACzB,QAAQ,EAAE,gBAAgB,EAC1B,GAAG,CAAC,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAC9B,OAAO,CAAC,IAAI,CAAC;CA8DhB;AAED,qBAAa,mCAAoC,SAAQ,SAAU,YAAW,oCAAoC;IACxG,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;IAKvE,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,gCAAgC,CAAC;CASxF;AAED,qBAAa,qBAAsB,SAAQ,SAAS;IAC1C,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAQrD,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;CAqB5D"}
1
+ {"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AACH,OAAO,KAAK,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAG3E,OAAO,KAAK,EAAE,UAAU,EAAE,YAAY,EAAW,SAAS,EAAE,MAAM,UAAU,CAAC;AAG7E,OAAO,EACN,KAAK,oCAAoC,EACzC,kBAAkB,EAClB,KAAK,qBAAqB,EAC1B,KAAK,qBAAqB,EAC1B,MAAM,kBAAkB,CAAC;AAG1B,MAAM,WAAW,gBAAgB;IAChC,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,CAAC;IACf,YAAY,EAAE,MAAM,CAAC;CACrB;AAED,UAAU,sBAAsB;IAC/B,MAAM,EAAE,KAAK,CAAC;QACb,GAAG,EAAE,MAAM,CAAC;KACZ,CAAC,CAAC;CACH;AAYD,eAAO,MAAM,2BAA2B,UAA0D,CAAC;AAEnG,uBAAe,SAAU,SAAQ,kBAAkB;gBACtC,GAAG,CAAC,EAAE,MAAM;IAIxB,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAG3D,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAG3B,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;CAUtF;AAMD,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;IAC1E,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAuBrD,WAAW,CAAC,QAAQ,EAAE,sBAAsB,EAAE,UAAU,CAAC,EAAE,KAAK,GAAG,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;CAkBjH;AAED,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;;IAI1E,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAMpC,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAQrD,WAAW,CACzB,QAAQ,EAAE,gBAAgB,EAC1B,GAAG,CAAC,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAC9B,OAAO,CAAC,IAAI,CAAC;CA8DhB;AAED,qBAAa,mCAAoC,SAAQ,SAAU,YAAW,oCAAoC;IACxG,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;IAKvE,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,gCAAgC,CAAC;CASxF;AAED,qBAAa,qBAAsB,SAAQ,SAAS;IAC1C,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAQrD,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;CAqB5D"}
@@ -0,0 +1,38 @@
1
+ /**
2
+ * See the registered mapping of HF model ID => OVHcloud model ID here:
3
+ *
4
+ * https://huggingface.co/api/partners/ovhcloud/models
5
+ *
6
+ * This is a publicly available mapping.
7
+ *
8
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
9
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
10
+ *
11
+ * - If you work at OVHcloud and want to update this mapping, please use the model mapping API we provide on huggingface.co
12
+ * - If you're a community member and want to add a new supported HF model to OVHcloud, please open an issue on the present repo
13
+ * and we will tag OVHcloud team members.
14
+ *
15
+ * Thanks!
16
+ */
17
+ import { BaseConversationalTask, BaseTextGenerationTask } from "./providerHelper";
18
+ import type { ChatCompletionOutput, TextGenerationOutput, TextGenerationOutputFinishReason } from "@huggingface/tasks";
19
+ import type { BodyParams } from "../types";
20
+ import type { TextGenerationInput } from "@huggingface/tasks";
21
+ interface OvhCloudTextCompletionOutput extends Omit<ChatCompletionOutput, "choices"> {
22
+ choices: Array<{
23
+ text: string;
24
+ finish_reason: TextGenerationOutputFinishReason;
25
+ logprobs: unknown;
26
+ index: number;
27
+ }>;
28
+ }
29
+ export declare class OvhCloudConversationalTask extends BaseConversationalTask {
30
+ constructor();
31
+ }
32
+ export declare class OvhCloudTextGenerationTask extends BaseTextGenerationTask {
33
+ constructor();
34
+ preparePayload(params: BodyParams<TextGenerationInput>): Record<string, unknown>;
35
+ getResponse(response: OvhCloudTextCompletionOutput): Promise<TextGenerationOutput>;
36
+ }
37
+ export {};
38
+ //# sourceMappingURL=ovhcloud.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ovhcloud.d.ts","sourceRoot":"","sources":["../../../src/providers/ovhcloud.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AAEH,OAAO,EAAE,sBAAsB,EAAE,sBAAsB,EAAE,MAAM,kBAAkB,CAAC;AAClF,OAAO,KAAK,EAAE,oBAAoB,EAAE,oBAAoB,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAEvH,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,UAAU,CAAC;AAE3C,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAI9D,UAAU,4BAA6B,SAAQ,IAAI,CAAC,oBAAoB,EAAE,SAAS,CAAC;IACnF,OAAO,EAAE,KAAK,CAAC;QACd,IAAI,EAAE,MAAM,CAAC;QACb,aAAa,EAAE,gCAAgC,CAAC;QAChD,QAAQ,EAAE,OAAO,CAAC;QAClB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC,CAAC;CACH;AAED,qBAAa,0BAA2B,SAAQ,sBAAsB;;CAIrE;AAED,qBAAa,0BAA2B,SAAQ,sBAAsB;;IAK5D,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,mBAAmB,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAc1E,WAAW,CAAC,QAAQ,EAAE,4BAA4B,GAAG,OAAO,CAAC,oBAAoB,CAAC;CAcjG"}
@@ -1,6 +1,6 @@
1
1
  import { type InferenceSnippet, type ModelDataMinimal } from "@huggingface/tasks";
2
- import type { InferenceProvider } from "../types";
3
2
  import type { InferenceProviderModelMapping } from "../lib/getInferenceProviderMapping";
3
+ import type { InferenceProvider } from "../types";
4
4
  export type InferenceSnippetOptions = {
5
5
  streaming?: boolean;
6
6
  billTo?: string;
@@ -1 +1 @@
1
- {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAI5B,OAAO,KAAK,EAAE,iBAAiB,EAA8B,MAAM,UAAU,CAAC;AAE9E,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AAGxF,MAAM,MAAM,uBAAuB,GAAG;IAAE,SAAS,CAAC,EAAE,OAAO,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,CAAA;CAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAiTzG,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAG5B,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AAGxF,OAAO,KAAK,EAAE,iBAAiB,EAA8B,MAAM,UAAU,CAAC;AAG9E,MAAM,MAAM,uBAAuB,GAAG;IAAE,SAAS,CAAC,EAAE,OAAO,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,CAAA;CAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAkTzG,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -1 +1 @@
1
- {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAsEnE,CAAC"}
1
+ {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CA0EnE,CAAC"}
@@ -30,7 +30,7 @@ export interface Options {
30
30
  billTo?: string;
31
31
  }
32
32
  export type InferenceTask = Exclude<PipelineType, "other"> | "conversational";
33
- export declare const INFERENCE_PROVIDERS: readonly ["black-forest-labs", "cerebras", "cohere", "fal-ai", "featherless-ai", "fireworks-ai", "groq", "hf-inference", "hyperbolic", "nebius", "novita", "nscale", "openai", "replicate", "sambanova", "together"];
33
+ export declare const INFERENCE_PROVIDERS: readonly ["black-forest-labs", "cerebras", "cohere", "fal-ai", "featherless-ai", "fireworks-ai", "groq", "hf-inference", "hyperbolic", "nebius", "novita", "nscale", "openai", "ovhcloud", "replicate", "sambanova", "together"];
34
34
  export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
35
35
  export interface BaseArgs {
36
36
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAC5E,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,mCAAmC,CAAC;AAEvF;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IAEzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;IAEtC;;;;;OAKG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,GAAG,gBAAgB,CAAC;AAE9E,eAAO,MAAM,mBAAmB,sNAiBtB,CAAC;AAEX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACrC,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,UAAU,GAAG,qBAAqB,GAAG,cAAc,CAAC;AAEtF,MAAM,WAAW,YAAY;IAC5B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,UAAU,EAAE,UAAU,CAAC;CACvB;AAED,MAAM,WAAW,SAAS;IACzB,UAAU,EAAE,UAAU,CAAC;IACvB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,aAAa,CAAC;CACrB;AAED,MAAM,WAAW,UAAU,CAAC,CAAC,SAAS,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IACtF,IAAI,EAAE,CAAC,CAAC;IACR,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,CAAC,EAAE,6BAA6B,GAAG,SAAS,CAAC;IACpD,IAAI,CAAC,EAAE,aAAa,CAAC;CACrB"}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAC5E,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,mCAAmC,CAAC;AAEvF;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IAEzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;IAEtC;;;;;OAKG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,GAAG,gBAAgB,CAAC;AAE9E,eAAO,MAAM,mBAAmB,kOAkBtB,CAAC;AAEX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACrC,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,UAAU,GAAG,qBAAqB,GAAG,cAAc,CAAC;AAEtF,MAAM,WAAW,YAAY;IAC5B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,UAAU,EAAE,UAAU,CAAC;CACvB;AAED,MAAM,WAAW,SAAS;IACzB,UAAU,EAAE,UAAU,CAAC;IACvB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,aAAa,CAAC;CACrB;AAED,MAAM,WAAW,UAAU,CAAC,CAAC,SAAS,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IACtF,IAAI,EAAE,CAAC,CAAC;IACR,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,CAAC,EAAE,6BAA6B,GAAG,SAAS,CAAC;IACpD,IAAI,CAAC,EAAE,aAAa,CAAC;CACrB"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "3.9.1",
3
+ "version": "3.10.0",
4
4
  "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -40,8 +40,8 @@
40
40
  },
41
41
  "type": "module",
42
42
  "dependencies": {
43
- "@huggingface/tasks": "^0.18.10",
44
- "@huggingface/jinja": "^0.3.4"
43
+ "@huggingface/tasks": "^0.19.0",
44
+ "@huggingface/jinja": "^0.4.0"
45
45
  },
46
46
  "devDependencies": {
47
47
  "@types/node": "18.13.0"
@@ -8,7 +8,7 @@ import { typedInclude } from "../utils/typedInclude";
8
8
  export const inferenceProviderMappingCache = new Map<ModelId, InferenceProviderMapping>();
9
9
 
10
10
  export type InferenceProviderMapping = Partial<
11
- Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId" | "adapterWeightsPath">>
11
+ Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId">>
12
12
  >;
13
13
 
14
14
  export interface InferenceProviderModelMapping {
@@ -74,22 +74,6 @@ export async function getInferenceProviderMapping(
74
74
  `Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
75
75
  );
76
76
  }
77
- if (providerMapping.adapter === "lora") {
78
- const treeResp = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${params.modelId}/tree/main`);
79
- if (!treeResp.ok) {
80
- throw new Error(`Unable to fetch the model tree for ${params.modelId}.`);
81
- }
82
- const tree: Array<{ type: "file" | "directory"; path: string }> = await treeResp.json();
83
- const adapterWeightsPath = tree.find(({ type, path }) => type === "file" && path.endsWith(".safetensors"))?.path;
84
- if (!adapterWeightsPath) {
85
- throw new Error(`No .safetensors file found in the model tree for ${params.modelId}.`);
86
- }
87
- return {
88
- ...providerMapping,
89
- hfModelId: params.modelId,
90
- adapterWeightsPath,
91
- };
92
- }
93
77
  return { ...providerMapping, hfModelId: params.modelId };
94
78
  }
95
79
  return null;
@@ -11,6 +11,7 @@ import * as Nebius from "../providers/nebius";
11
11
  import * as Novita from "../providers/novita";
12
12
  import * as Nscale from "../providers/nscale";
13
13
  import * as OpenAI from "../providers/openai";
14
+ import * as OvhCloud from "../providers/ovhcloud";
14
15
  import type {
15
16
  AudioClassificationTaskHelper,
16
17
  AudioToAudioTaskHelper,
@@ -126,6 +127,10 @@ export const PROVIDERS: Record<InferenceProvider, Partial<Record<InferenceTask,
126
127
  openai: {
127
128
  conversational: new OpenAI.OpenAIConversationalTask(),
128
129
  },
130
+ ovhcloud: {
131
+ conversational: new OvhCloud.OvhCloudConversationalTask(),
132
+ "text-generation": new OvhCloud.OvhCloudTextGenerationTask(),
133
+ },
129
134
  replicate: {
130
135
  "text-to-image": new Replicate.ReplicateTextToImageTask(),
131
136
  "text-to-speech": new Replicate.ReplicateTextToSpeechTask(),
@@ -32,6 +32,7 @@ export const HARDCODED_MODEL_INFERENCE_MAPPING: Record<
32
32
  novita: {},
33
33
  nscale: {},
34
34
  openai: {},
35
+ ovhcloud: {},
35
36
  replicate: {},
36
37
  sambanova: {},
37
38
  together: {},
@@ -86,16 +86,6 @@ export class FalAITextToImageTask extends FalAITask implements TextToImageTaskHe
86
86
  ...(params.args.parameters as Record<string, unknown>),
87
87
  sync_mode: true,
88
88
  prompt: params.args.inputs,
89
- ...(params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath
90
- ? {
91
- loras: [
92
- {
93
- path: buildLoraPath(params.mapping.hfModelId, params.mapping.adapterWeightsPath),
94
- scale: 1,
95
- },
96
- ],
97
- }
98
- : undefined),
99
89
  };
100
90
 
101
91
  if (params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath) {
@@ -0,0 +1,75 @@
1
+ /**
2
+ * See the registered mapping of HF model ID => OVHcloud model ID here:
3
+ *
4
+ * https://huggingface.co/api/partners/ovhcloud/models
5
+ *
6
+ * This is a publicly available mapping.
7
+ *
8
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
9
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
10
+ *
11
+ * - If you work at OVHcloud and want to update this mapping, please use the model mapping API we provide on huggingface.co
12
+ * - If you're a community member and want to add a new supported HF model to OVHcloud, please open an issue on the present repo
13
+ * and we will tag OVHcloud team members.
14
+ *
15
+ * Thanks!
16
+ */
17
+
18
+ import { BaseConversationalTask, BaseTextGenerationTask } from "./providerHelper";
19
+ import type { ChatCompletionOutput, TextGenerationOutput, TextGenerationOutputFinishReason } from "@huggingface/tasks";
20
+ import { InferenceOutputError } from "../lib/InferenceOutputError";
21
+ import type { BodyParams } from "../types";
22
+ import { omit } from "../utils/omit";
23
+ import type { TextGenerationInput } from "@huggingface/tasks";
24
+
25
+ const OVHCLOUD_API_BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net";
26
+
27
+ interface OvhCloudTextCompletionOutput extends Omit<ChatCompletionOutput, "choices"> {
28
+ choices: Array<{
29
+ text: string;
30
+ finish_reason: TextGenerationOutputFinishReason;
31
+ logprobs: unknown;
32
+ index: number;
33
+ }>;
34
+ }
35
+
36
+ export class OvhCloudConversationalTask extends BaseConversationalTask {
37
+ constructor() {
38
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
39
+ }
40
+ }
41
+
42
+ export class OvhCloudTextGenerationTask extends BaseTextGenerationTask {
43
+ constructor() {
44
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
45
+ }
46
+
47
+ override preparePayload(params: BodyParams<TextGenerationInput>): Record<string, unknown> {
48
+ return {
49
+ model: params.model,
50
+ ...omit(params.args, ["inputs", "parameters"]),
51
+ ...(params.args.parameters
52
+ ? {
53
+ max_tokens: (params.args.parameters as Record<string, unknown>).max_new_tokens,
54
+ ...omit(params.args.parameters as Record<string, unknown>, "max_new_tokens"),
55
+ }
56
+ : undefined),
57
+ prompt: params.args.inputs,
58
+ };
59
+ }
60
+
61
+ override async getResponse(response: OvhCloudTextCompletionOutput): Promise<TextGenerationOutput> {
62
+ if (
63
+ typeof response === "object" &&
64
+ "choices" in response &&
65
+ Array.isArray(response?.choices) &&
66
+ typeof response?.model === "string"
67
+ ) {
68
+ const completion = response.choices[0];
69
+ return {
70
+ generated_text: completion.text,
71
+ };
72
+ }
73
+ throw new InferenceOutputError("Expected OVHcloud text generation response format");
74
+ }
75
+ }
@@ -8,11 +8,11 @@ import {
8
8
  } from "@huggingface/tasks";
9
9
  import type { PipelineType, WidgetType } from "@huggingface/tasks/src/pipelines.js";
10
10
  import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
11
+ import type { InferenceProviderModelMapping } from "../lib/getInferenceProviderMapping";
12
+ import { getProviderHelper } from "../lib/getProviderHelper";
11
13
  import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions";
12
14
  import type { InferenceProvider, InferenceTask, RequestArgs } from "../types";
13
15
  import { templates } from "./templates.exported";
14
- import type { InferenceProviderModelMapping } from "../lib/getInferenceProviderMapping";
15
- import { getProviderHelper } from "../lib/getProviderHelper";
16
16
 
17
17
  export type InferenceSnippetOptions = { streaming?: boolean; billTo?: string } & Record<string, unknown>;
18
18
 
@@ -112,6 +112,7 @@ const HF_JS_METHODS: Partial<Record<WidgetType, string>> = {
112
112
  "text-generation": "textGeneration",
113
113
  "text2text-generation": "textGeneration",
114
114
  "token-classification": "tokenClassification",
115
+ "text-to-speech": "textToSpeech",
115
116
  translation: "translation",
116
117
  };
117
118
 
@@ -310,7 +311,7 @@ const snippets: Partial<
310
311
  "text-generation": snippetGenerator("basic"),
311
312
  "text-to-audio": snippetGenerator("textToAudio"),
312
313
  "text-to-image": snippetGenerator("textToImage"),
313
- "text-to-speech": snippetGenerator("textToAudio"),
314
+ "text-to-speech": snippetGenerator("textToSpeech"),
314
315
  "text-to-video": snippetGenerator("textToVideo"),
315
316
  "text2text-generation": snippetGenerator("basic"),
316
317
  "token-classification": snippetGenerator("basic"),
@@ -7,6 +7,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
7
7
  "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
8
  "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
9
9
  "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});",
10
+ "textToSpeech": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
10
11
  "zeroShotClassification": "async function query(data) {\n const response = await fetch(\n\t\t\"{{ fullUrl }}\",\n {\n headers: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n \"Content-Type\": \"application/json\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %} },\n method: \"POST\",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: [\"refund\", \"legal\", \"faq\"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});"
11
12
  },
12
13
  "huggingface.js": {
@@ -16,7 +17,8 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
16
17
  "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);",
17
18
  "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = client.chatCompletionStream({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t}\n}",
18
19
  "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)",
19
- "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
20
+ "textToSpeech": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst audio = await client.textToSpeech({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated audio (it's a Blob)",
21
+ "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst video = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
20
22
  },
21
23
  "openai": {
22
24
  "conversational": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n{% if billTo %}\n\tdefaultHeaders: {\n\t\t\"X-HF-Bill-To\": \"{{ billTo }}\" \n\t}\n{% endif %}\n});\n\nconst chatCompletion = await client.chat.completions.create({\n\tmodel: \"{{ providerModelId }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
@@ -25,7 +27,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
25
27
  },
26
28
  "python": {
27
29
  "fal_client": {
28
- "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} "
30
+ "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} "
29
31
  },
30
32
  "huggingface_hub": {
31
33
  "basic": "result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n)",
@@ -37,6 +39,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
37
39
  "imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
38
40
  "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n bill_to=\"{{ billTo }}\",\n{% endif %}\n)",
39
41
  "textToImage": "# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) ",
42
+ "textToSpeech": "# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) \n",
40
43
  "textToVideo": "video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) "
41
44
  },
42
45
  "openai": {
@@ -53,8 +56,9 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
53
56
  "imageToImage": "def query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
54
57
  "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\n \"Authorization\": \"{{ authorizationHeader }}\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n{% endif %}\n}",
55
58
  "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
56
- "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
59
+ "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
57
60
  "textToImage": "{% if provider == \"hf-inference\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}",
61
+ "textToSpeech": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"text\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"text\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
58
62
  "zeroShotClassification": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]},\n}) ",
59
63
  "zeroShotImageClassification": "def query(data):\n with open(data[\"image_path\"], \"rb\") as f:\n img = f.read()\n payload={\n \"parameters\": data[\"parameters\"],\n \"inputs\": base64.b64encode(img).decode(\"utf-8\")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"image_path\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"cat\", \"dog\", \"llama\"]},\n}) "
60
64
  }
package/src/types.ts CHANGED
@@ -51,6 +51,7 @@ export const INFERENCE_PROVIDERS = [
51
51
  "novita",
52
52
  "nscale",
53
53
  "openai",
54
+ "ovhcloud",
54
55
  "replicate",
55
56
  "sambanova",
56
57
  "together",