@huggingface/inference 4.0.6 → 4.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/commonjs/lib/getProviderHelper.d.ts.map +1 -1
  2. package/dist/commonjs/lib/getProviderHelper.js +2 -0
  3. package/dist/commonjs/package.d.ts +1 -1
  4. package/dist/commonjs/package.js +1 -1
  5. package/dist/commonjs/providers/fal-ai.d.ts +16 -2
  6. package/dist/commonjs/providers/fal-ai.d.ts.map +1 -1
  7. package/dist/commonjs/providers/fal-ai.js +93 -43
  8. package/dist/commonjs/providers/replicate.d.ts +8 -2
  9. package/dist/commonjs/providers/replicate.d.ts.map +1 -1
  10. package/dist/commonjs/providers/replicate.js +46 -1
  11. package/dist/commonjs/snippets/templates.exported.d.ts.map +1 -1
  12. package/dist/commonjs/snippets/templates.exported.js +5 -2
  13. package/dist/commonjs/tasks/cv/imageToImage.d.ts.map +1 -1
  14. package/dist/commonjs/tasks/cv/imageToImage.js +3 -1
  15. package/dist/esm/lib/getProviderHelper.d.ts.map +1 -1
  16. package/dist/esm/lib/getProviderHelper.js +2 -0
  17. package/dist/esm/package.d.ts +1 -1
  18. package/dist/esm/package.js +1 -1
  19. package/dist/esm/providers/fal-ai.d.ts +16 -2
  20. package/dist/esm/providers/fal-ai.d.ts.map +1 -1
  21. package/dist/esm/providers/fal-ai.js +91 -42
  22. package/dist/esm/providers/replicate.d.ts +8 -2
  23. package/dist/esm/providers/replicate.d.ts.map +1 -1
  24. package/dist/esm/providers/replicate.js +45 -1
  25. package/dist/esm/snippets/templates.exported.d.ts.map +1 -1
  26. package/dist/esm/snippets/templates.exported.js +5 -2
  27. package/dist/esm/tasks/cv/imageToImage.d.ts.map +1 -1
  28. package/dist/esm/tasks/cv/imageToImage.js +3 -1
  29. package/package.json +2 -2
  30. package/src/lib/getProviderHelper.ts +2 -0
  31. package/src/package.ts +1 -1
  32. package/src/providers/fal-ai.ts +132 -57
  33. package/src/providers/replicate.ts +63 -2
  34. package/src/snippets/templates.exported.ts +5 -2
  35. package/src/tasks/cv/imageToImage.ts +3 -1
@@ -16,9 +16,16 @@
16
16
  */
17
17
  import { InferenceClientProviderOutputError } from "../errors.js";
18
18
  import { isUrl } from "../lib/isUrl.js";
19
- import type { BodyParams, HeaderParams, UrlParams } from "../types.js";
19
+ import type { BodyParams, HeaderParams, RequestArgs, UrlParams } from "../types.js";
20
20
  import { omit } from "../utils/omit.js";
21
- import { TaskProviderHelper, type TextToImageTaskHelper, type TextToVideoTaskHelper } from "./providerHelper.js";
21
+ import {
22
+ TaskProviderHelper,
23
+ type ImageToImageTaskHelper,
24
+ type TextToImageTaskHelper,
25
+ type TextToVideoTaskHelper,
26
+ } from "./providerHelper.js";
27
+ import type { ImageToImageArgs } from "../tasks/cv/imageToImage.js";
28
+ import { base64FromBytes } from "../utils/base64FromBytes.js";
22
29
  export interface ReplicateOutput {
23
30
  output?: string | string[];
24
31
  }
@@ -152,3 +159,57 @@ export class ReplicateTextToVideoTask extends ReplicateTask implements TextToVid
152
159
  throw new InferenceClientProviderOutputError("Received malformed response from Replicate text-to-video API");
153
160
  }
154
161
  }
162
+
163
+ export class ReplicateImageToImageTask extends ReplicateTask implements ImageToImageTaskHelper {
164
+ override preparePayload(params: BodyParams<ImageToImageArgs>): Record<string, unknown> {
165
+ return {
166
+ input: {
167
+ ...omit(params.args, ["inputs", "parameters"]),
168
+ ...params.args.parameters,
169
+ input_image: params.args.inputs, // This will be processed in preparePayloadAsync
170
+ },
171
+ version: params.model.includes(":") ? params.model.split(":")[1] : undefined,
172
+ };
173
+ }
174
+
175
+ async preparePayloadAsync(args: ImageToImageArgs): Promise<RequestArgs> {
176
+ const { inputs, ...restArgs } = args;
177
+
178
+ // Convert Blob to base64 data URL
179
+ const bytes = new Uint8Array(await inputs.arrayBuffer());
180
+ const base64 = base64FromBytes(bytes);
181
+ const imageInput = `data:${inputs.type || "image/jpeg"};base64,${base64}`;
182
+
183
+ return {
184
+ ...restArgs,
185
+ inputs: imageInput,
186
+ };
187
+ }
188
+
189
+ override async getResponse(response: ReplicateOutput): Promise<Blob> {
190
+ if (
191
+ typeof response === "object" &&
192
+ !!response &&
193
+ "output" in response &&
194
+ Array.isArray(response.output) &&
195
+ response.output.length > 0 &&
196
+ typeof response.output[0] === "string"
197
+ ) {
198
+ const urlResponse = await fetch(response.output[0]);
199
+ return await urlResponse.blob();
200
+ }
201
+
202
+ if (
203
+ typeof response === "object" &&
204
+ !!response &&
205
+ "output" in response &&
206
+ typeof response.output === "string" &&
207
+ isUrl(response.output)
208
+ ) {
209
+ const urlResponse = await fetch(response.output);
210
+ return await urlResponse.blob();
211
+ }
212
+
213
+ throw new InferenceClientProviderOutputError("Received malformed response from Replicate image-to-image API");
214
+ }
215
+ }
@@ -5,6 +5,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
5
5
  "basic": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
6
6
  "basicAudio": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"audio/flac\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
7
7
  "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
+ "imageToImage": "const image = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: {\n\t\t\t\t\"inputs\": `data:image/png;base64,${data.inputs.encode(\"base64\")}`,\n\t\t\t\t\"parameters\": data.parameters,\n\t\t\t}\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ \n\tinputs: image,\n\tparameters: {\n\t\tprompt: \"{{ inputs.asObj.parameters.prompt }}\",\n\t}\n}).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
9
  "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
9
10
  "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});",
10
11
  "textToSpeech": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
@@ -16,6 +17,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
16
17
  "basicImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(output);",
17
18
  "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);",
18
19
  "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = client.chatCompletionStream({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t}\n}",
20
+ "imageToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nconst image = await client.imageToImage({\n{% if endpointUrl %}\n\tendpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n\tprovider: \"{{provider}}\",\n\tmodel: \"{{model.id}}\",\n\tinputs: data,\n\tparameters: { prompt: \"{{inputs.asObj.parameters.prompt}}\", },\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)\n// For example, you can save it to a file or display it in an image element\n",
19
21
  "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)",
20
22
  "textToSpeech": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst audio = await client.textToSpeech({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated audio (it's a Blob)",
21
23
  "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst video = await client.textToVideo({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
@@ -27,6 +29,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
27
29
  },
28
30
  "python": {
29
31
  "fal_client": {
32
+ "imageToImage": "{%if provider == \"fal-ai\" %}\nimport fal_client\nimport base64\n\ndef on_queue_update(update):\n if isinstance(update, fal_client.InProgress):\n for log in update.logs:\n print(log[\"message\"])\n\nwith open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\nresult = fal_client.subscribe(\n \"fal-ai/flux-kontext/dev\",\n arguments={\n \"prompt\": f\"data:image/png;base64,{image_base_64}\",\n \"image_url\": \"{{ providerInputs.asObj.inputs }}\",\n },\n with_logs=True,\n on_queue_update=on_queue_update,\n)\nprint(result)\n{%endif%}\n",
30
33
  "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} "
31
34
  },
32
35
  "huggingface_hub": {
@@ -36,7 +39,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
36
39
  "conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
37
40
  "conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ",
38
41
  "documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
39
- "imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
42
+ "imageToImage": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\n# output is a PIL.Image object\nimage = client.image_to_image(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
40
43
  "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n{% if endpointUrl %}\n base_url=\"{{ baseUrl }}\",\n{% endif %}\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n bill_to=\"{{ billTo }}\",\n{% endif %}\n)",
41
44
  "questionAnswering": "answer = client.question_answering(\n question=\"{{ inputs.asObj.question }}\",\n context=\"{{ inputs.asObj.context }}\",\n model=\"{{ model.id }}\",\n) ",
42
45
  "tableQuestionAnswering": "answer = client.table_question_answering(\n query=\"{{ inputs.asObj.query }}\",\n table={{ inputs.asObj.table }},\n model=\"{{ model.id }}\",\n) ",
@@ -55,7 +58,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
55
58
  "conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])",
56
59
  "conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
57
60
  "documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ",
58
- "imageToImage": "def query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
61
+ "imageToImage": "with open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
59
62
  "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\n \"Authorization\": \"{{ authorizationHeader }}\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n{% endif %}\n}",
60
63
  "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
61
64
  "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
@@ -3,6 +3,7 @@ import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
3
3
  import { getProviderHelper } from "../../lib/getProviderHelper.js";
4
4
  import type { BaseArgs, Options } from "../../types.js";
5
5
  import { innerRequest } from "../../utils/request.js";
6
+ import { makeRequestOptions } from "../../lib/makeRequestOptions.js";
6
7
 
7
8
  export type ImageToImageArgs = BaseArgs & ImageToImageInput;
8
9
 
@@ -18,5 +19,6 @@ export async function imageToImage(args: ImageToImageArgs, options?: Options): P
18
19
  ...options,
19
20
  task: "image-to-image",
20
21
  });
21
- return providerHelper.getResponse(res);
22
+ const { url, info } = await makeRequestOptions(args, providerHelper, { ...options, task: "image-to-image" });
23
+ return providerHelper.getResponse(res, url, info.headers as Record<string, string>);
22
24
  }