@huggingface/inference 3.9.2 → 3.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,7 +1,7 @@
1
1
  # 🤗 Hugging Face Inference
2
2
 
3
- A Typescript powered wrapper for the HF Inference API (serverless), Inference Endpoints (dedicated), and all supported Inference Providers.
4
- It works with [Inference API (serverless)](https://huggingface.co/docs/api-inference/index) and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index), and even with all supported third-party Inference Providers.
3
+ A Typescript powered wrapper for Inference Providers (serverless) and Inference Endpoints (dedicated).
4
+ It works with [Inference Providers (serverless)](https://huggingface.co/docs/api-inference/index) – including all supported third-party Inference Providers – and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index), and even with .
5
5
 
6
6
  Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README).
7
7
 
@@ -25,20 +25,20 @@ yarn add @huggingface/inference
25
25
 
26
26
  ```ts
27
27
  // esm.sh
28
- import { InferenceClient } from "https://esm.sh/@huggingface/inference"
28
+ import { InferenceClient } from "https://esm.sh/@huggingface/inference";
29
29
  // or npm:
30
- import { InferenceClient } from "npm:@huggingface/inference"
30
+ import { InferenceClient } from "npm:@huggingface/inference";
31
31
  ```
32
32
 
33
33
  ### Initialize
34
34
 
35
35
  ```typescript
36
- import { InferenceClient } from '@huggingface/inference'
36
+ import { InferenceClient } from '@huggingface/inference';
37
37
 
38
- const hf = new InferenceClient('your access token')
38
+ const hf = new InferenceClient('your access token');
39
39
  ```
40
40
 
41
- ❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
41
+ ❗**Important note:** Always pass an access token. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
42
42
 
43
43
  Your access token should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the access token.
44
44
 
@@ -54,6 +54,7 @@ Currently, we support the following providers:
54
54
  - [Nebius](https://studio.nebius.ai)
55
55
  - [Novita](https://novita.ai/?utm_source=github_huggingface&utm_medium=github_readme&utm_campaign=link)
56
56
  - [Nscale](https://nscale.com)
57
+ - [OVHcloud](https://endpoints.ai.cloud.ovh.net/)
57
58
  - [Replicate](https://replicate.com)
58
59
  - [Sambanova](https://sambanova.ai)
59
60
  - [Together](https://together.xyz)
@@ -84,6 +85,7 @@ Only a subset of models are supported when requesting third-party providers. You
84
85
  - [Hyperbolic supported models](https://huggingface.co/api/partners/hyperbolic/models)
85
86
  - [Nebius supported models](https://huggingface.co/api/partners/nebius/models)
86
87
  - [Nscale supported models](https://huggingface.co/api/partners/nscale/models)
88
+ - [OVHcloud supported models](https://huggingface.co/api/partners/ovhcloud/models)
87
89
  - [Replicate supported models](https://huggingface.co/api/partners/replicate/models)
88
90
  - [Sambanova supported models](https://huggingface.co/api/partners/sambanova/models)
89
91
  - [Together supported models](https://huggingface.co/api/partners/together/models)
package/dist/index.cjs CHANGED
@@ -1025,6 +1025,39 @@ var OpenAIConversationalTask = class extends BaseConversationalTask {
1025
1025
  }
1026
1026
  };
1027
1027
 
1028
+ // src/providers/ovhcloud.ts
1029
+ var OVHCLOUD_API_BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net";
1030
+ var OvhCloudConversationalTask = class extends BaseConversationalTask {
1031
+ constructor() {
1032
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
1033
+ }
1034
+ };
1035
+ var OvhCloudTextGenerationTask = class extends BaseTextGenerationTask {
1036
+ constructor() {
1037
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
1038
+ }
1039
+ preparePayload(params) {
1040
+ return {
1041
+ model: params.model,
1042
+ ...omit(params.args, ["inputs", "parameters"]),
1043
+ ...params.args.parameters ? {
1044
+ max_tokens: params.args.parameters.max_new_tokens,
1045
+ ...omit(params.args.parameters, "max_new_tokens")
1046
+ } : void 0,
1047
+ prompt: params.args.inputs
1048
+ };
1049
+ }
1050
+ async getResponse(response) {
1051
+ if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
1052
+ const completion = response.choices[0];
1053
+ return {
1054
+ generated_text: completion.text
1055
+ };
1056
+ }
1057
+ throw new InferenceOutputError("Expected OVHcloud text generation response format");
1058
+ }
1059
+ };
1060
+
1028
1061
  // src/providers/replicate.ts
1029
1062
  var ReplicateTask = class extends TaskProviderHelper {
1030
1063
  constructor(url) {
@@ -1277,6 +1310,10 @@ var PROVIDERS = {
1277
1310
  openai: {
1278
1311
  conversational: new OpenAIConversationalTask()
1279
1312
  },
1313
+ ovhcloud: {
1314
+ conversational: new OvhCloudConversationalTask(),
1315
+ "text-generation": new OvhCloudTextGenerationTask()
1316
+ },
1280
1317
  replicate: {
1281
1318
  "text-to-image": new ReplicateTextToImageTask(),
1282
1319
  "text-to-speech": new ReplicateTextToSpeechTask(),
@@ -1315,7 +1352,7 @@ function getProviderHelper(provider, task) {
1315
1352
 
1316
1353
  // package.json
1317
1354
  var name = "@huggingface/inference";
1318
- var version = "3.9.2";
1355
+ var version = "3.10.0";
1319
1356
 
1320
1357
  // src/providers/consts.ts
1321
1358
  var HARDCODED_MODEL_INFERENCE_MAPPING = {
@@ -1338,6 +1375,7 @@ var HARDCODED_MODEL_INFERENCE_MAPPING = {
1338
1375
  novita: {},
1339
1376
  nscale: {},
1340
1377
  openai: {},
1378
+ ovhcloud: {},
1341
1379
  replicate: {},
1342
1380
  sambanova: {},
1343
1381
  together: {}
@@ -2248,6 +2286,7 @@ var INFERENCE_PROVIDERS = [
2248
2286
  "novita",
2249
2287
  "nscale",
2250
2288
  "openai",
2289
+ "ovhcloud",
2251
2290
  "replicate",
2252
2291
  "sambanova",
2253
2292
  "together"
@@ -2272,6 +2311,7 @@ var templates = {
2272
2311
  "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
2273
2312
  "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2274
2313
  "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
2314
+ "textToSpeech": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2275
2315
  "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
2276
2316
  },
2277
2317
  "huggingface.js": {
@@ -2293,11 +2333,23 @@ const image = await client.textToImage({
2293
2333
  billTo: "{{ billTo }}",
2294
2334
  }{% endif %});
2295
2335
  /// Use the generated image (it's a Blob)`,
2336
+ "textToSpeech": `import { InferenceClient } from "@huggingface/inference";
2337
+
2338
+ const client = new InferenceClient("{{ accessToken }}");
2339
+
2340
+ const audio = await client.textToSpeech({
2341
+ provider: "{{ provider }}",
2342
+ model: "{{ model.id }}",
2343
+ inputs: {{ inputs.asObj.inputs }},
2344
+ }{% if billTo %}, {
2345
+ billTo: "{{ billTo }}",
2346
+ }{% endif %});
2347
+ // Use the generated audio (it's a Blob)`,
2296
2348
  "textToVideo": `import { InferenceClient } from "@huggingface/inference";
2297
2349
 
2298
2350
  const client = new InferenceClient("{{ accessToken }}");
2299
2351
 
2300
- const image = await client.textToVideo({
2352
+ const video = await client.textToVideo({
2301
2353
  provider: "{{ provider }}",
2302
2354
  model: "{{ model.id }}",
2303
2355
  inputs: {{ inputs.asObj.inputs }},
@@ -2313,7 +2365,7 @@ const image = await client.textToVideo({
2313
2365
  },
2314
2366
  "python": {
2315
2367
  "fal_client": {
2316
- "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
2368
+ "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n "loras":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} '
2317
2369
  },
2318
2370
  "huggingface_hub": {
2319
2371
  "basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
@@ -2325,6 +2377,7 @@ const image = await client.textToVideo({
2325
2377
  "imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
2326
2378
  "importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n bill_to="{{ billTo }}",\n{% endif %}\n)',
2327
2379
  "textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
2380
+ "textToSpeech": '# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) \n',
2328
2381
  "textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
2329
2382
  },
2330
2383
  "openai": {
@@ -2341,8 +2394,9 @@ const image = await client.textToVideo({
2341
2394
  "imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
2342
2395
  "importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {\n "Authorization": "{{ authorizationHeader }}",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}"\n{% endif %}\n}',
2343
2396
  "tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
2344
- "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2397
+ "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2345
2398
  "textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
2399
+ "textToSpeech": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2346
2400
  "zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
2347
2401
  "zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
2348
2402
  }
@@ -2445,6 +2499,7 @@ var HF_JS_METHODS = {
2445
2499
  "text-generation": "textGeneration",
2446
2500
  "text2text-generation": "textGeneration",
2447
2501
  "token-classification": "tokenClassification",
2502
+ "text-to-speech": "textToSpeech",
2448
2503
  translation: "translation"
2449
2504
  };
2450
2505
  var snippetGenerator = (templateName, inputPreparationFn) => {
@@ -2591,7 +2646,7 @@ var snippets = {
2591
2646
  "text-generation": snippetGenerator("basic"),
2592
2647
  "text-to-audio": snippetGenerator("textToAudio"),
2593
2648
  "text-to-image": snippetGenerator("textToImage"),
2594
- "text-to-speech": snippetGenerator("textToAudio"),
2649
+ "text-to-speech": snippetGenerator("textToSpeech"),
2595
2650
  "text-to-video": snippetGenerator("textToVideo"),
2596
2651
  "text2text-generation": snippetGenerator("basic"),
2597
2652
  "token-classification": snippetGenerator("basic"),
package/dist/index.js CHANGED
@@ -968,6 +968,39 @@ var OpenAIConversationalTask = class extends BaseConversationalTask {
968
968
  }
969
969
  };
970
970
 
971
+ // src/providers/ovhcloud.ts
972
+ var OVHCLOUD_API_BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net";
973
+ var OvhCloudConversationalTask = class extends BaseConversationalTask {
974
+ constructor() {
975
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
976
+ }
977
+ };
978
+ var OvhCloudTextGenerationTask = class extends BaseTextGenerationTask {
979
+ constructor() {
980
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
981
+ }
982
+ preparePayload(params) {
983
+ return {
984
+ model: params.model,
985
+ ...omit(params.args, ["inputs", "parameters"]),
986
+ ...params.args.parameters ? {
987
+ max_tokens: params.args.parameters.max_new_tokens,
988
+ ...omit(params.args.parameters, "max_new_tokens")
989
+ } : void 0,
990
+ prompt: params.args.inputs
991
+ };
992
+ }
993
+ async getResponse(response) {
994
+ if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
995
+ const completion = response.choices[0];
996
+ return {
997
+ generated_text: completion.text
998
+ };
999
+ }
1000
+ throw new InferenceOutputError("Expected OVHcloud text generation response format");
1001
+ }
1002
+ };
1003
+
971
1004
  // src/providers/replicate.ts
972
1005
  var ReplicateTask = class extends TaskProviderHelper {
973
1006
  constructor(url) {
@@ -1220,6 +1253,10 @@ var PROVIDERS = {
1220
1253
  openai: {
1221
1254
  conversational: new OpenAIConversationalTask()
1222
1255
  },
1256
+ ovhcloud: {
1257
+ conversational: new OvhCloudConversationalTask(),
1258
+ "text-generation": new OvhCloudTextGenerationTask()
1259
+ },
1223
1260
  replicate: {
1224
1261
  "text-to-image": new ReplicateTextToImageTask(),
1225
1262
  "text-to-speech": new ReplicateTextToSpeechTask(),
@@ -1258,7 +1295,7 @@ function getProviderHelper(provider, task) {
1258
1295
 
1259
1296
  // package.json
1260
1297
  var name = "@huggingface/inference";
1261
- var version = "3.9.2";
1298
+ var version = "3.10.0";
1262
1299
 
1263
1300
  // src/providers/consts.ts
1264
1301
  var HARDCODED_MODEL_INFERENCE_MAPPING = {
@@ -1281,6 +1318,7 @@ var HARDCODED_MODEL_INFERENCE_MAPPING = {
1281
1318
  novita: {},
1282
1319
  nscale: {},
1283
1320
  openai: {},
1321
+ ovhcloud: {},
1284
1322
  replicate: {},
1285
1323
  sambanova: {},
1286
1324
  together: {}
@@ -2191,6 +2229,7 @@ var INFERENCE_PROVIDERS = [
2191
2229
  "novita",
2192
2230
  "nscale",
2193
2231
  "openai",
2232
+ "ovhcloud",
2194
2233
  "replicate",
2195
2234
  "sambanova",
2196
2235
  "together"
@@ -2218,6 +2257,7 @@ var templates = {
2218
2257
  "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
2219
2258
  "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2220
2259
  "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
2260
+ "textToSpeech": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2221
2261
  "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
2222
2262
  },
2223
2263
  "huggingface.js": {
@@ -2239,11 +2279,23 @@ const image = await client.textToImage({
2239
2279
  billTo: "{{ billTo }}",
2240
2280
  }{% endif %});
2241
2281
  /// Use the generated image (it's a Blob)`,
2282
+ "textToSpeech": `import { InferenceClient } from "@huggingface/inference";
2283
+
2284
+ const client = new InferenceClient("{{ accessToken }}");
2285
+
2286
+ const audio = await client.textToSpeech({
2287
+ provider: "{{ provider }}",
2288
+ model: "{{ model.id }}",
2289
+ inputs: {{ inputs.asObj.inputs }},
2290
+ }{% if billTo %}, {
2291
+ billTo: "{{ billTo }}",
2292
+ }{% endif %});
2293
+ // Use the generated audio (it's a Blob)`,
2242
2294
  "textToVideo": `import { InferenceClient } from "@huggingface/inference";
2243
2295
 
2244
2296
  const client = new InferenceClient("{{ accessToken }}");
2245
2297
 
2246
- const image = await client.textToVideo({
2298
+ const video = await client.textToVideo({
2247
2299
  provider: "{{ provider }}",
2248
2300
  model: "{{ model.id }}",
2249
2301
  inputs: {{ inputs.asObj.inputs }},
@@ -2259,7 +2311,7 @@ const image = await client.textToVideo({
2259
2311
  },
2260
2312
  "python": {
2261
2313
  "fal_client": {
2262
- "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
2314
+ "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n "loras":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} '
2263
2315
  },
2264
2316
  "huggingface_hub": {
2265
2317
  "basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
@@ -2271,6 +2323,7 @@ const image = await client.textToVideo({
2271
2323
  "imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
2272
2324
  "importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n bill_to="{{ billTo }}",\n{% endif %}\n)',
2273
2325
  "textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
2326
+ "textToSpeech": '# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) \n',
2274
2327
  "textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
2275
2328
  },
2276
2329
  "openai": {
@@ -2287,8 +2340,9 @@ const image = await client.textToVideo({
2287
2340
  "imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
2288
2341
  "importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {\n "Authorization": "{{ authorizationHeader }}",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}"\n{% endif %}\n}',
2289
2342
  "tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
2290
- "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2343
+ "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2291
2344
  "textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
2345
+ "textToSpeech": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2292
2346
  "zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
2293
2347
  "zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
2294
2348
  }
@@ -2391,6 +2445,7 @@ var HF_JS_METHODS = {
2391
2445
  "text-generation": "textGeneration",
2392
2446
  "text2text-generation": "textGeneration",
2393
2447
  "token-classification": "tokenClassification",
2448
+ "text-to-speech": "textToSpeech",
2394
2449
  translation: "translation"
2395
2450
  };
2396
2451
  var snippetGenerator = (templateName, inputPreparationFn) => {
@@ -2537,7 +2592,7 @@ var snippets = {
2537
2592
  "text-generation": snippetGenerator("basic"),
2538
2593
  "text-to-audio": snippetGenerator("textToAudio"),
2539
2594
  "text-to-image": snippetGenerator("textToImage"),
2540
- "text-to-speech": snippetGenerator("textToAudio"),
2595
+ "text-to-speech": snippetGenerator("textToSpeech"),
2541
2596
  "text-to-video": snippetGenerator("textToVideo"),
2542
2597
  "text2text-generation": snippetGenerator("basic"),
2543
2598
  "token-classification": snippetGenerator("basic"),
@@ -1,6 +1,6 @@
1
1
  import type { WidgetType } from "@huggingface/tasks";
2
2
  import type { InferenceProvider, ModelId } from "../types";
3
- export declare const inferenceProviderMappingCache: Map<string, Partial<Record<"black-forest-labs" | "cerebras" | "cohere" | "fal-ai" | "featherless-ai" | "fireworks-ai" | "groq" | "hf-inference" | "hyperbolic" | "nebius" | "novita" | "nscale" | "openai" | "replicate" | "sambanova" | "together", Omit<InferenceProviderModelMapping, "hfModelId">>>>;
3
+ export declare const inferenceProviderMappingCache: Map<string, Partial<Record<"black-forest-labs" | "cerebras" | "cohere" | "fal-ai" | "featherless-ai" | "fireworks-ai" | "groq" | "hf-inference" | "hyperbolic" | "nebius" | "novita" | "nscale" | "openai" | "ovhcloud" | "replicate" | "sambanova" | "together", Omit<InferenceProviderModelMapping, "hfModelId">>>>;
4
4
  export type InferenceProviderMapping = Partial<Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId">>>;
5
5
  export interface InferenceProviderModelMapping {
6
6
  adapter?: string;
@@ -1 +1 @@
1
- {"version":3,"file":"getInferenceProviderMapping.d.ts","sourceRoot":"","sources":["../../../src/lib/getInferenceProviderMapping.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,KAAK,EAAE,iBAAiB,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAM3D,eAAO,MAAM,6BAA6B,0SAA+C,CAAC;AAE1F,MAAM,MAAM,wBAAwB,GAAG,OAAO,CAC7C,MAAM,CAAC,iBAAiB,EAAE,IAAI,CAAC,6BAA6B,EAAE,WAAW,CAAC,CAAC,CAC3E,CAAC;AAEF,MAAM,WAAW,6BAA6B;IAC7C,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,SAAS,EAAE,OAAO,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,GAAG,SAAS,CAAC;IAC3B,IAAI,EAAE,UAAU,CAAC;CACjB;AAED,wBAAsB,2BAA2B,CAChD,MAAM,EAAE;IACP,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,iBAAiB,CAAC;IAC5B,IAAI,EAAE,UAAU,CAAC;CACjB,EACD,OAAO,EAAE;IACR,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,WAAW,EAAE,IAAI,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,QAAQ,CAAC,CAAC;CACtE,GACC,OAAO,CAAC,6BAA6B,GAAG,IAAI,CAAC,CA+C/C"}
1
+ {"version":3,"file":"getInferenceProviderMapping.d.ts","sourceRoot":"","sources":["../../../src/lib/getInferenceProviderMapping.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,KAAK,EAAE,iBAAiB,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAM3D,eAAO,MAAM,6BAA6B,uTAA+C,CAAC;AAE1F,MAAM,MAAM,wBAAwB,GAAG,OAAO,CAC7C,MAAM,CAAC,iBAAiB,EAAE,IAAI,CAAC,6BAA6B,EAAE,WAAW,CAAC,CAAC,CAC3E,CAAC;AAEF,MAAM,WAAW,6BAA6B;IAC7C,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,SAAS,EAAE,OAAO,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,GAAG,SAAS,CAAC;IAC3B,IAAI,EAAE,UAAU,CAAC;CACjB;AAED,wBAAsB,2BAA2B,CAChD,MAAM,EAAE;IACP,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,iBAAiB,CAAC;IAC5B,IAAI,EAAE,UAAU,CAAC;CACjB,EACD,OAAO,EAAE;IACR,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,WAAW,EAAE,IAAI,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,QAAQ,CAAC,CAAC;CACtE,GACC,OAAO,CAAC,6BAA6B,GAAG,IAAI,CAAC,CA+C/C"}
@@ -1 +1 @@
1
- {"version":3,"file":"getProviderHelper.d.ts","sourceRoot":"","sources":["../../../src/lib/getProviderHelper.ts"],"names":[],"mappings":"AAaA,OAAO,KAAK,EACX,6BAA6B,EAC7B,sBAAsB,EACtB,oCAAoC,EACpC,wBAAwB,EACxB,mCAAmC,EACnC,2BAA2B,EAC3B,kBAAkB,EAClB,6BAA6B,EAC7B,2BAA2B,EAC3B,sBAAsB,EACtB,qBAAqB,EACrB,yBAAyB,EACzB,2BAA2B,EAC3B,4BAA4B,EAC5B,uBAAuB,EACvB,gCAAgC,EAChC,+BAA+B,EAC/B,2BAA2B,EAC3B,kBAAkB,EAClB,4BAA4B,EAC5B,wBAAwB,EACxB,qBAAqB,EACrB,qBAAqB,EACrB,sBAAsB,EACtB,qBAAqB,EACrB,6BAA6B,EAC7B,qBAAqB,EACrB,iCAAiC,EACjC,gCAAgC,EAChC,qCAAqC,EACrC,MAAM,6BAA6B,CAAC;AAIrC,OAAO,KAAK,EAAE,iBAAiB,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AAEjE,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,iBAAiB,EAAE,OAAO,CAAC,MAAM,CAAC,aAAa,EAAE,kBAAkB,CAAC,CAAC,CA4FnG,CAAC;AAEF;;GAEG;AACH,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,wBAAwB,GAAG,kBAAkB,CAAC;AACjD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,iBAAiB,GACrB,wBAAwB,GAAG,kBAAkB,CAAC;AACjD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,8BAA8B,GAClC,oCAAoC,GAAG,kBAAkB,CAAC;AAC7D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,qBAAqB,GACzB,4BAA4B,GAAG,kBAAkB,CAAC;AACrD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,WAAW,GACf,kBAAkB,GAAG,kBAAkB,CAAC;AAC3C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,6BAA6B,GACjC,mCAAmC,GAAG,kBAAkB,CAAC;AAC5D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,kBAAkB,GACtB,yBAAyB,GAAG,kBAAkB,CAAC;AAClD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gCAAgC,GACpC,qCAAqC,GAAG,kBAAkB,CAAC;AAC9D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,0BAA0B,GAC9B,gCAAgC,GAAG,kBAAkB,CAAC;AACzD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,qBAAqB,GACzB,4BAA4B,GAAG,kBAAkB,CAAC;AACrD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,0BAA0B,GAC9B,gCAAgC,GAAG,kBAAkB,CAAC;AACzD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,wBAAwB,GAC5B,+BAA+B,GAAG,kBAAkB,CAAC;AACxD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,aAAa,GACjB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,uBAAuB,GAAG,kBAAkB,CAAC;AAChD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,2BAA2B,GAC/B,iCAAiC,GAAG,kBAAkB,CAAC;AAC1D,wBAAgB,iBAAiB,CAAC,QAAQ,EAAE,iBAAiB,EAAE,IAAI,EAAE,aAAa,GAAG,SAAS,GAAG,kBAAkB,CAAC"}
1
+ {"version":3,"file":"getProviderHelper.d.ts","sourceRoot":"","sources":["../../../src/lib/getProviderHelper.ts"],"names":[],"mappings":"AAcA,OAAO,KAAK,EACX,6BAA6B,EAC7B,sBAAsB,EACtB,oCAAoC,EACpC,wBAAwB,EACxB,mCAAmC,EACnC,2BAA2B,EAC3B,kBAAkB,EAClB,6BAA6B,EAC7B,2BAA2B,EAC3B,sBAAsB,EACtB,qBAAqB,EACrB,yBAAyB,EACzB,2BAA2B,EAC3B,4BAA4B,EAC5B,uBAAuB,EACvB,gCAAgC,EAChC,+BAA+B,EAC/B,2BAA2B,EAC3B,kBAAkB,EAClB,4BAA4B,EAC5B,wBAAwB,EACxB,qBAAqB,EACrB,qBAAqB,EACrB,sBAAsB,EACtB,qBAAqB,EACrB,6BAA6B,EAC7B,qBAAqB,EACrB,iCAAiC,EACjC,gCAAgC,EAChC,qCAAqC,EACrC,MAAM,6BAA6B,CAAC;AAIrC,OAAO,KAAK,EAAE,iBAAiB,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AAEjE,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,iBAAiB,EAAE,OAAO,CAAC,MAAM,CAAC,aAAa,EAAE,kBAAkB,CAAC,CAAC,CAgGnG,CAAC;AAEF;;GAEG;AACH,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,wBAAwB,GAAG,kBAAkB,CAAC;AACjD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,iBAAiB,GACrB,wBAAwB,GAAG,kBAAkB,CAAC;AACjD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,8BAA8B,GAClC,oCAAoC,GAAG,kBAAkB,CAAC;AAC7D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,qBAAqB,GACzB,4BAA4B,GAAG,kBAAkB,CAAC;AACrD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,WAAW,GACf,kBAAkB,GAAG,kBAAkB,CAAC;AAC3C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,6BAA6B,GACjC,mCAAmC,GAAG,kBAAkB,CAAC;AAC5D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,kBAAkB,GACtB,yBAAyB,GAAG,kBAAkB,CAAC;AAClD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gCAAgC,GACpC,qCAAqC,GAAG,kBAAkB,CAAC;AAC9D,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,0BAA0B,GAC9B,gCAAgC,GAAG,kBAAkB,CAAC;AACzD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,gBAAgB,GACpB,sBAAsB,GAAG,kBAAkB,CAAC;AAC/C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,qBAAqB,GACzB,4BAA4B,GAAG,kBAAkB,CAAC;AACrD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,0BAA0B,GAC9B,gCAAgC,GAAG,kBAAkB,CAAC;AACzD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,wBAAwB,GAC5B,+BAA+B,GAAG,kBAAkB,CAAC;AACxD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,oBAAoB,GACxB,2BAA2B,GAAG,kBAAkB,CAAC;AACpD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,sBAAsB,GAC1B,6BAA6B,GAAG,kBAAkB,CAAC;AACtD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,aAAa,GACjB,qBAAqB,GAAG,kBAAkB,CAAC;AAC9C,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,eAAe,GACnB,uBAAuB,GAAG,kBAAkB,CAAC;AAChD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,EAAE,2BAA2B,GAC/B,iCAAiC,GAAG,kBAAkB,CAAC;AAC1D,wBAAgB,iBAAiB,CAAC,QAAQ,EAAE,iBAAiB,EAAE,IAAI,EAAE,aAAa,GAAG,SAAS,GAAG,kBAAkB,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AACxF,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC;;;;;;GAMG;AACH,eAAO,MAAM,iCAAiC,EAAE,MAAM,CACrD,iBAAiB,EACjB,MAAM,CAAC,OAAO,EAAE,6BAA6B,CAAC,CAwB9C,CAAC"}
1
+ {"version":3,"file":"consts.d.ts","sourceRoot":"","sources":["../../../src/providers/consts.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AACxF,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAClD,OAAO,EAAE,KAAK,OAAO,EAAE,MAAM,UAAU,CAAC;AAExC;;;;;;GAMG;AACH,eAAO,MAAM,iCAAiC,EAAE,MAAM,CACrD,iBAAiB,EACjB,MAAM,CAAC,OAAO,EAAE,6BAA6B,CAAC,CAyB9C,CAAC"}
@@ -0,0 +1,38 @@
1
+ /**
2
+ * See the registered mapping of HF model ID => OVHcloud model ID here:
3
+ *
4
+ * https://huggingface.co/api/partners/ovhcloud/models
5
+ *
6
+ * This is a publicly available mapping.
7
+ *
8
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
9
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
10
+ *
11
+ * - If you work at OVHcloud and want to update this mapping, please use the model mapping API we provide on huggingface.co
12
+ * - If you're a community member and want to add a new supported HF model to OVHcloud, please open an issue on the present repo
13
+ * and we will tag OVHcloud team members.
14
+ *
15
+ * Thanks!
16
+ */
17
+ import { BaseConversationalTask, BaseTextGenerationTask } from "./providerHelper";
18
+ import type { ChatCompletionOutput, TextGenerationOutput, TextGenerationOutputFinishReason } from "@huggingface/tasks";
19
+ import type { BodyParams } from "../types";
20
+ import type { TextGenerationInput } from "@huggingface/tasks";
21
+ interface OvhCloudTextCompletionOutput extends Omit<ChatCompletionOutput, "choices"> {
22
+ choices: Array<{
23
+ text: string;
24
+ finish_reason: TextGenerationOutputFinishReason;
25
+ logprobs: unknown;
26
+ index: number;
27
+ }>;
28
+ }
29
+ export declare class OvhCloudConversationalTask extends BaseConversationalTask {
30
+ constructor();
31
+ }
32
+ export declare class OvhCloudTextGenerationTask extends BaseTextGenerationTask {
33
+ constructor();
34
+ preparePayload(params: BodyParams<TextGenerationInput>): Record<string, unknown>;
35
+ getResponse(response: OvhCloudTextCompletionOutput): Promise<TextGenerationOutput>;
36
+ }
37
+ export {};
38
+ //# sourceMappingURL=ovhcloud.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ovhcloud.d.ts","sourceRoot":"","sources":["../../../src/providers/ovhcloud.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AAEH,OAAO,EAAE,sBAAsB,EAAE,sBAAsB,EAAE,MAAM,kBAAkB,CAAC;AAClF,OAAO,KAAK,EAAE,oBAAoB,EAAE,oBAAoB,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAEvH,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,UAAU,CAAC;AAE3C,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAI9D,UAAU,4BAA6B,SAAQ,IAAI,CAAC,oBAAoB,EAAE,SAAS,CAAC;IACnF,OAAO,EAAE,KAAK,CAAC;QACd,IAAI,EAAE,MAAM,CAAC;QACb,aAAa,EAAE,gCAAgC,CAAC;QAChD,QAAQ,EAAE,OAAO,CAAC;QAClB,KAAK,EAAE,MAAM,CAAC;KACd,CAAC,CAAC;CACH;AAED,qBAAa,0BAA2B,SAAQ,sBAAsB;;CAIrE;AAED,qBAAa,0BAA2B,SAAQ,sBAAsB;;IAK5D,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,mBAAmB,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAc1E,WAAW,CAAC,QAAQ,EAAE,4BAA4B,GAAG,OAAO,CAAC,oBAAoB,CAAC;CAcjG"}
@@ -1,6 +1,6 @@
1
1
  import { type InferenceSnippet, type ModelDataMinimal } from "@huggingface/tasks";
2
- import type { InferenceProvider } from "../types";
3
2
  import type { InferenceProviderModelMapping } from "../lib/getInferenceProviderMapping";
3
+ import type { InferenceProvider } from "../types";
4
4
  export type InferenceSnippetOptions = {
5
5
  streaming?: boolean;
6
6
  billTo?: string;
@@ -1 +1 @@
1
- {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAI5B,OAAO,KAAK,EAAE,iBAAiB,EAA8B,MAAM,UAAU,CAAC;AAE9E,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AAGxF,MAAM,MAAM,uBAAuB,GAAG;IAAE,SAAS,CAAC,EAAE,OAAO,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,CAAA;CAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAiTzG,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAG5B,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AAGxF,OAAO,KAAK,EAAE,iBAAiB,EAA8B,MAAM,UAAU,CAAC;AAG9E,MAAM,MAAM,uBAAuB,GAAG;IAAE,SAAS,CAAC,EAAE,OAAO,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,CAAA;CAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAkTzG,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -1 +1 @@
1
- {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAsEnE,CAAC"}
1
+ {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CA0EnE,CAAC"}
@@ -30,7 +30,7 @@ export interface Options {
30
30
  billTo?: string;
31
31
  }
32
32
  export type InferenceTask = Exclude<PipelineType, "other"> | "conversational";
33
- export declare const INFERENCE_PROVIDERS: readonly ["black-forest-labs", "cerebras", "cohere", "fal-ai", "featherless-ai", "fireworks-ai", "groq", "hf-inference", "hyperbolic", "nebius", "novita", "nscale", "openai", "replicate", "sambanova", "together"];
33
+ export declare const INFERENCE_PROVIDERS: readonly ["black-forest-labs", "cerebras", "cohere", "fal-ai", "featherless-ai", "fireworks-ai", "groq", "hf-inference", "hyperbolic", "nebius", "novita", "nscale", "openai", "ovhcloud", "replicate", "sambanova", "together"];
34
34
  export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
35
35
  export interface BaseArgs {
36
36
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAC5E,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,mCAAmC,CAAC;AAEvF;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IAEzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;IAEtC;;;;;OAKG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,GAAG,gBAAgB,CAAC;AAE9E,eAAO,MAAM,mBAAmB,sNAiBtB,CAAC;AAEX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACrC,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,UAAU,GAAG,qBAAqB,GAAG,cAAc,CAAC;AAEtF,MAAM,WAAW,YAAY;IAC5B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,UAAU,EAAE,UAAU,CAAC;CACvB;AAED,MAAM,WAAW,SAAS;IACzB,UAAU,EAAE,UAAU,CAAC;IACvB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,aAAa,CAAC;CACrB;AAED,MAAM,WAAW,UAAU,CAAC,CAAC,SAAS,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IACtF,IAAI,EAAE,CAAC,CAAC;IACR,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,CAAC,EAAE,6BAA6B,GAAG,SAAS,CAAC;IACpD,IAAI,CAAC,EAAE,aAAa,CAAC;CACrB"}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAC5E,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,mCAAmC,CAAC;AAEvF;;GAEG;AACH,MAAM,MAAM,OAAO,GAAG,MAAM,CAAC;AAE7B,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IAEzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;IAEtC;;;;;OAKG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,GAAG,gBAAgB,CAAC;AAE9E,eAAO,MAAM,mBAAmB,kOAkBtB,CAAC;AAEX,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,MAAM,WAAW,QAAQ;IACxB;;;;;;OAMG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;;OAIG;IACH,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CACG;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAC5B;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GACnB;IAAE,MAAM,EAAE,MAAM,CAAA;CAAE,GAClB;IAAE,IAAI,EAAE,MAAM,CAAA;CAAE,GAChB;IAAE,SAAS,EAAE,MAAM,CAAA;CAAE,GACrB,mBAAmB,CACrB,GAAG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACrC,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG,UAAU,GAAG,qBAAqB,GAAG,cAAc,CAAC;AAEtF,MAAM,WAAW,YAAY;IAC5B,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,UAAU,EAAE,UAAU,CAAC;CACvB;AAED,MAAM,WAAW,SAAS;IACzB,UAAU,EAAE,UAAU,CAAC;IACvB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,aAAa,CAAC;CACrB;AAED,MAAM,WAAW,UAAU,CAAC,CAAC,SAAS,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IACtF,IAAI,EAAE,CAAC,CAAC;IACR,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,CAAC,EAAE,6BAA6B,GAAG,SAAS,CAAC;IACpD,IAAI,CAAC,EAAE,aAAa,CAAC;CACrB"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "3.9.2",
3
+ "version": "3.10.0",
4
4
  "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -40,8 +40,8 @@
40
40
  },
41
41
  "type": "module",
42
42
  "dependencies": {
43
- "@huggingface/tasks": "^0.18.11",
44
- "@huggingface/jinja": "^0.3.4"
43
+ "@huggingface/tasks": "^0.19.0",
44
+ "@huggingface/jinja": "^0.4.0"
45
45
  },
46
46
  "devDependencies": {
47
47
  "@types/node": "18.13.0"
@@ -11,6 +11,7 @@ import * as Nebius from "../providers/nebius";
11
11
  import * as Novita from "../providers/novita";
12
12
  import * as Nscale from "../providers/nscale";
13
13
  import * as OpenAI from "../providers/openai";
14
+ import * as OvhCloud from "../providers/ovhcloud";
14
15
  import type {
15
16
  AudioClassificationTaskHelper,
16
17
  AudioToAudioTaskHelper,
@@ -126,6 +127,10 @@ export const PROVIDERS: Record<InferenceProvider, Partial<Record<InferenceTask,
126
127
  openai: {
127
128
  conversational: new OpenAI.OpenAIConversationalTask(),
128
129
  },
130
+ ovhcloud: {
131
+ conversational: new OvhCloud.OvhCloudConversationalTask(),
132
+ "text-generation": new OvhCloud.OvhCloudTextGenerationTask(),
133
+ },
129
134
  replicate: {
130
135
  "text-to-image": new Replicate.ReplicateTextToImageTask(),
131
136
  "text-to-speech": new Replicate.ReplicateTextToSpeechTask(),
@@ -32,6 +32,7 @@ export const HARDCODED_MODEL_INFERENCE_MAPPING: Record<
32
32
  novita: {},
33
33
  nscale: {},
34
34
  openai: {},
35
+ ovhcloud: {},
35
36
  replicate: {},
36
37
  sambanova: {},
37
38
  together: {},
@@ -0,0 +1,75 @@
1
+ /**
2
+ * See the registered mapping of HF model ID => OVHcloud model ID here:
3
+ *
4
+ * https://huggingface.co/api/partners/ovhcloud/models
5
+ *
6
+ * This is a publicly available mapping.
7
+ *
8
+ * If you want to try to run inference for a new model locally before it's registered on huggingface.co,
9
+ * you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
10
+ *
11
+ * - If you work at OVHcloud and want to update this mapping, please use the model mapping API we provide on huggingface.co
12
+ * - If you're a community member and want to add a new supported HF model to OVHcloud, please open an issue on the present repo
13
+ * and we will tag OVHcloud team members.
14
+ *
15
+ * Thanks!
16
+ */
17
+
18
+ import { BaseConversationalTask, BaseTextGenerationTask } from "./providerHelper";
19
+ import type { ChatCompletionOutput, TextGenerationOutput, TextGenerationOutputFinishReason } from "@huggingface/tasks";
20
+ import { InferenceOutputError } from "../lib/InferenceOutputError";
21
+ import type { BodyParams } from "../types";
22
+ import { omit } from "../utils/omit";
23
+ import type { TextGenerationInput } from "@huggingface/tasks";
24
+
25
+ const OVHCLOUD_API_BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net";
26
+
27
+ interface OvhCloudTextCompletionOutput extends Omit<ChatCompletionOutput, "choices"> {
28
+ choices: Array<{
29
+ text: string;
30
+ finish_reason: TextGenerationOutputFinishReason;
31
+ logprobs: unknown;
32
+ index: number;
33
+ }>;
34
+ }
35
+
36
+ export class OvhCloudConversationalTask extends BaseConversationalTask {
37
+ constructor() {
38
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
39
+ }
40
+ }
41
+
42
+ export class OvhCloudTextGenerationTask extends BaseTextGenerationTask {
43
+ constructor() {
44
+ super("ovhcloud", OVHCLOUD_API_BASE_URL);
45
+ }
46
+
47
+ override preparePayload(params: BodyParams<TextGenerationInput>): Record<string, unknown> {
48
+ return {
49
+ model: params.model,
50
+ ...omit(params.args, ["inputs", "parameters"]),
51
+ ...(params.args.parameters
52
+ ? {
53
+ max_tokens: (params.args.parameters as Record<string, unknown>).max_new_tokens,
54
+ ...omit(params.args.parameters as Record<string, unknown>, "max_new_tokens"),
55
+ }
56
+ : undefined),
57
+ prompt: params.args.inputs,
58
+ };
59
+ }
60
+
61
+ override async getResponse(response: OvhCloudTextCompletionOutput): Promise<TextGenerationOutput> {
62
+ if (
63
+ typeof response === "object" &&
64
+ "choices" in response &&
65
+ Array.isArray(response?.choices) &&
66
+ typeof response?.model === "string"
67
+ ) {
68
+ const completion = response.choices[0];
69
+ return {
70
+ generated_text: completion.text,
71
+ };
72
+ }
73
+ throw new InferenceOutputError("Expected OVHcloud text generation response format");
74
+ }
75
+ }
@@ -8,11 +8,11 @@ import {
8
8
  } from "@huggingface/tasks";
9
9
  import type { PipelineType, WidgetType } from "@huggingface/tasks/src/pipelines.js";
10
10
  import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
11
+ import type { InferenceProviderModelMapping } from "../lib/getInferenceProviderMapping";
12
+ import { getProviderHelper } from "../lib/getProviderHelper";
11
13
  import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions";
12
14
  import type { InferenceProvider, InferenceTask, RequestArgs } from "../types";
13
15
  import { templates } from "./templates.exported";
14
- import type { InferenceProviderModelMapping } from "../lib/getInferenceProviderMapping";
15
- import { getProviderHelper } from "../lib/getProviderHelper";
16
16
 
17
17
  export type InferenceSnippetOptions = { streaming?: boolean; billTo?: string } & Record<string, unknown>;
18
18
 
@@ -112,6 +112,7 @@ const HF_JS_METHODS: Partial<Record<WidgetType, string>> = {
112
112
  "text-generation": "textGeneration",
113
113
  "text2text-generation": "textGeneration",
114
114
  "token-classification": "tokenClassification",
115
+ "text-to-speech": "textToSpeech",
115
116
  translation: "translation",
116
117
  };
117
118
 
@@ -310,7 +311,7 @@ const snippets: Partial<
310
311
  "text-generation": snippetGenerator("basic"),
311
312
  "text-to-audio": snippetGenerator("textToAudio"),
312
313
  "text-to-image": snippetGenerator("textToImage"),
313
- "text-to-speech": snippetGenerator("textToAudio"),
314
+ "text-to-speech": snippetGenerator("textToSpeech"),
314
315
  "text-to-video": snippetGenerator("textToVideo"),
315
316
  "text2text-generation": snippetGenerator("basic"),
316
317
  "token-classification": snippetGenerator("basic"),
@@ -7,6 +7,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
7
7
  "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
8
  "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
9
9
  "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});",
10
+ "textToSpeech": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
10
11
  "zeroShotClassification": "async function query(data) {\n const response = await fetch(\n\t\t\"{{ fullUrl }}\",\n {\n headers: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n \"Content-Type\": \"application/json\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %} },\n method: \"POST\",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: [\"refund\", \"legal\", \"faq\"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});"
11
12
  },
12
13
  "huggingface.js": {
@@ -16,7 +17,8 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
16
17
  "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);",
17
18
  "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = client.chatCompletionStream({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t}\n}",
18
19
  "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)",
19
- "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
20
+ "textToSpeech": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst audio = await client.textToSpeech({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated audio (it's a Blob)",
21
+ "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst video = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
20
22
  },
21
23
  "openai": {
22
24
  "conversational": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n{% if billTo %}\n\tdefaultHeaders: {\n\t\t\"X-HF-Bill-To\": \"{{ billTo }}\" \n\t}\n{% endif %}\n});\n\nconst chatCompletion = await client.chat.completions.create({\n\tmodel: \"{{ providerModelId }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
@@ -25,7 +27,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
25
27
  },
26
28
  "python": {
27
29
  "fal_client": {
28
- "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} "
30
+ "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} "
29
31
  },
30
32
  "huggingface_hub": {
31
33
  "basic": "result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n)",
@@ -37,6 +39,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
37
39
  "imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
38
40
  "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n bill_to=\"{{ billTo }}\",\n{% endif %}\n)",
39
41
  "textToImage": "# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) ",
42
+ "textToSpeech": "# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) \n",
40
43
  "textToVideo": "video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) "
41
44
  },
42
45
  "openai": {
@@ -53,8 +56,9 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
53
56
  "imageToImage": "def query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
54
57
  "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\n \"Authorization\": \"{{ authorizationHeader }}\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n{% endif %}\n}",
55
58
  "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
56
- "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
59
+ "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
57
60
  "textToImage": "{% if provider == \"hf-inference\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}",
61
+ "textToSpeech": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"text\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"text\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
58
62
  "zeroShotClassification": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]},\n}) ",
59
63
  "zeroShotImageClassification": "def query(data):\n with open(data[\"image_path\"], \"rb\") as f:\n img = f.read()\n payload={\n \"parameters\": data[\"parameters\"],\n \"inputs\": base64.b64encode(img).decode(\"utf-8\")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"image_path\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"cat\", \"dog\", \"llama\"]},\n}) "
60
64
  }
package/src/types.ts CHANGED
@@ -51,6 +51,7 @@ export const INFERENCE_PROVIDERS = [
51
51
  "novita",
52
52
  "nscale",
53
53
  "openai",
54
+ "ovhcloud",
54
55
  "replicate",
55
56
  "sambanova",
56
57
  "together",