@huggingface/inference 4.7.0 → 4.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,3 @@
1
- export declare const PACKAGE_VERSION = "4.7.0";
1
+ export declare const PACKAGE_VERSION = "4.7.1";
2
2
  export declare const PACKAGE_NAME = "@huggingface/inference";
3
3
  //# sourceMappingURL=package.d.ts.map
@@ -2,5 +2,5 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.PACKAGE_NAME = exports.PACKAGE_VERSION = void 0;
4
4
  // Generated file from package.json. Issues importing JSON directly when publishing on commonjs/ESM - see https://github.com/microsoft/TypeScript/issues/51783
5
- exports.PACKAGE_VERSION = "4.7.0";
5
+ exports.PACKAGE_VERSION = "4.7.1";
6
6
  exports.PACKAGE_NAME = "@huggingface/inference";
@@ -1 +1 @@
1
- {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAK5B,OAAO,KAAK,EAAE,6BAA6B,EAAE,yBAAyB,EAA8B,MAAM,aAAa,CAAC;AAKxH,MAAM,MAAM,uBAAuB,GAAG;IACrC,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACjC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAkY5B,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,QAAQ,EAAE,yBAAyB,EACnC,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAK5B,OAAO,KAAK,EAAE,6BAA6B,EAAE,yBAAyB,EAA8B,MAAM,aAAa,CAAC;AAKxH,MAAM,MAAM,uBAAuB,GAAG;IACrC,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACjC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAmY5B,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,QAAQ,EAAE,yBAAyB,EACnC,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -278,6 +278,7 @@ const snippets = {
278
278
  "image-text-to-text": snippetGenerator("conversational"),
279
279
  "image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
280
280
  "image-to-text": snippetGenerator("basicImage"),
281
+ "image-to-video": snippetGenerator("imageToVideo", prepareImageToImageInput),
281
282
  "object-detection": snippetGenerator("basicImage"),
282
283
  "question-answering": snippetGenerator("questionAnswering", prepareQuestionAnsweringInput),
283
284
  "sentence-similarity": snippetGenerator("basic"),
@@ -1 +1 @@
1
- {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAgFnE,CAAC"}
1
+ {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAqFnE,CAAC"}
@@ -10,6 +10,7 @@ exports.templates = {
10
10
  "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
11
11
  "conversational": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ \n{{ autoInputs.asTsString }}\n}).then((response) => {\n console.log(JSON.stringify(response));\n});",
12
12
  "imageToImage": "const image = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: {\n\t\t\t\t\"inputs\": `data:image/png;base64,${data.inputs.encode(\"base64\")}`,\n\t\t\t\t\"parameters\": data.parameters,\n\t\t\t}\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ \n\tinputs: image,\n\tparameters: {\n\t\tprompt: \"{{ inputs.asObj.parameters.prompt }}\",\n\t}\n}).then((response) => {\n console.log(JSON.stringify(response));\n});",
13
+ "imageToVideo": "const image = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: {\n\t\t\t\t\"image_url\": `data:image/png;base64,${data.image.encode(\"base64\")}`,\n\t\t\t\t\"prompt\": data.prompt,\n\t\t\t}\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({\n\t\"image\": image,\n\t\"prompt\": \"{{inputs.asObj.parameters.prompt}}\",\n}).then((response) => {\n // Use video\n});",
13
14
  "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
14
15
  "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});",
15
16
  "textToSpeech": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
@@ -22,6 +23,7 @@ exports.templates = {
22
23
  "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);",
23
24
  "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = client.chatCompletionStream({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t}\n}",
24
25
  "imageToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nconst image = await client.imageToImage({\n{% if endpointUrl %}\n\tendpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n\tprovider: \"{{provider}}\",\n\tmodel: \"{{model.id}}\",\n\tinputs: data,\n\tparameters: { prompt: \"{{inputs.asObj.parameters.prompt}}\", },\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)\n// For example, you can save it to a file or display it in an image element\n",
26
+ "imageToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nconst video = await client.imageToVideo({\n{% if endpointUrl %}\n\tendpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n\tprovider: \"{{provider}}\",\n\tmodel: \"{{model.id}}\",\n\tinputs: data,\n\tparameters: { prompt: \"{{inputs.asObj.parameters.prompt}}\", },\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\n/// Use the generated video (it's a Blob)\n// For example, you can save it to a file or display it in a video element\n",
25
27
  "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)",
26
28
  "textToSpeech": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst audio = await client.textToSpeech({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated audio (it's a Blob)",
27
29
  "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst video = await client.textToVideo({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
@@ -34,6 +36,7 @@ exports.templates = {
34
36
  "python": {
35
37
  "fal_client": {
36
38
  "imageToImage": "{%if provider == \"fal-ai\" %}\nimport fal_client\nimport base64\n\ndef on_queue_update(update):\n if isinstance(update, fal_client.InProgress):\n for log in update.logs:\n print(log[\"message\"])\n\nwith open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\nresult = fal_client.subscribe(\n \"fal-ai/flux-kontext/dev\",\n arguments={\n \"prompt\": f\"data:image/png;base64,{image_base_64}\",\n \"image_url\": \"{{ providerInputs.asObj.inputs }}\",\n },\n with_logs=True,\n on_queue_update=on_queue_update,\n)\nprint(result)\n{%endif%}\n",
39
+ "imageToVideo": "{%if provider == \"fal-ai\" %}\nimport fal_client\nimport base64\n\ndef on_queue_update(update):\n if isinstance(update, fal_client.InProgress):\n for log in update.logs:\n print(log[\"message\"])\n\nwith open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\nresult = fal_client.subscribe(\n \"{{model.id}}\",\n arguments={\n \"image_url\": f\"data:image/png;base64,{image_base_64}\",\n \"prompt\": \"{{inputs.asObj.parameters.prompt}}\",\n },\n with_logs=True,\n on_queue_update=on_queue_update,\n)\nprint(result)\n{%endif%}\n",
37
40
  "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} "
38
41
  },
39
42
  "huggingface_hub": {
@@ -43,7 +46,8 @@ exports.templates = {
43
46
  "conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
44
47
  "conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ",
45
48
  "documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
46
- "imageToImage": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\n# output is a PIL.Image object\nimage = client.image_to_image(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
49
+ "imageToImage": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\n# output is a PIL.Image object\nimage = client.image_to_image(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n)\n",
50
+ "imageToVideo": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\nvideo = client.image_to_video(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) \n",
47
51
  "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n{% if endpointUrl %}\n base_url=\"{{ baseUrl }}\",\n{% endif %}\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n bill_to=\"{{ billTo }}\",\n{% endif %}\n)",
48
52
  "questionAnswering": "answer = client.question_answering(\n question=\"{{ inputs.asObj.question }}\",\n context=\"{{ inputs.asObj.context }}\",\n model=\"{{ model.id }}\",\n) ",
49
53
  "tableQuestionAnswering": "answer = client.table_question_answering(\n query=\"{{ inputs.asObj.query }}\",\n table={{ inputs.asObj.table }},\n model=\"{{ model.id }}\",\n) ",
@@ -62,7 +66,8 @@ exports.templates = {
62
66
  "conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ autoInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])",
63
67
  "conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ autoInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
64
68
  "documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ",
65
- "imageToImage": "with open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
69
+ "imageToImage": "\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
70
+ "imageToVideo": "\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nvideo_bytes = query({\n{{ inputs.asJsonString }}\n})\n",
66
71
  "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\n \"Authorization\": \"{{ authorizationHeader }}\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n{% endif %}\n}",
67
72
  "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
68
73
  "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
@@ -1,3 +1,3 @@
1
- export declare const PACKAGE_VERSION = "4.7.0";
1
+ export declare const PACKAGE_VERSION = "4.7.1";
2
2
  export declare const PACKAGE_NAME = "@huggingface/inference";
3
3
  //# sourceMappingURL=package.d.ts.map
@@ -1,3 +1,3 @@
1
1
  // Generated file from package.json. Issues importing JSON directly when publishing on commonjs/ESM - see https://github.com/microsoft/TypeScript/issues/51783
2
- export const PACKAGE_VERSION = "4.7.0";
2
+ export const PACKAGE_VERSION = "4.7.1";
3
3
  export const PACKAGE_NAME = "@huggingface/inference";
@@ -1 +1 @@
1
- {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAK5B,OAAO,KAAK,EAAE,6BAA6B,EAAE,yBAAyB,EAA8B,MAAM,aAAa,CAAC;AAKxH,MAAM,MAAM,uBAAuB,GAAG;IACrC,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACjC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAkY5B,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,QAAQ,EAAE,yBAAyB,EACnC,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAK5B,OAAO,KAAK,EAAE,6BAA6B,EAAE,yBAAyB,EAA8B,MAAM,aAAa,CAAC;AAKxH,MAAM,MAAM,uBAAuB,GAAG;IACrC,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACjC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAmY5B,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,QAAQ,EAAE,yBAAyB,EACnC,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -275,6 +275,7 @@ const snippets = {
275
275
  "image-text-to-text": snippetGenerator("conversational"),
276
276
  "image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
277
277
  "image-to-text": snippetGenerator("basicImage"),
278
+ "image-to-video": snippetGenerator("imageToVideo", prepareImageToImageInput),
278
279
  "object-detection": snippetGenerator("basicImage"),
279
280
  "question-answering": snippetGenerator("questionAnswering", prepareQuestionAnsweringInput),
280
281
  "sentence-similarity": snippetGenerator("basic"),
@@ -1 +1 @@
1
- {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAgFnE,CAAC"}
1
+ {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAqFnE,CAAC"}
@@ -7,6 +7,7 @@ export const templates = {
7
7
  "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
8
  "conversational": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ \n{{ autoInputs.asTsString }}\n}).then((response) => {\n console.log(JSON.stringify(response));\n});",
9
9
  "imageToImage": "const image = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: {\n\t\t\t\t\"inputs\": `data:image/png;base64,${data.inputs.encode(\"base64\")}`,\n\t\t\t\t\"parameters\": data.parameters,\n\t\t\t}\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ \n\tinputs: image,\n\tparameters: {\n\t\tprompt: \"{{ inputs.asObj.parameters.prompt }}\",\n\t}\n}).then((response) => {\n console.log(JSON.stringify(response));\n});",
10
+ "imageToVideo": "const image = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: {\n\t\t\t\t\"image_url\": `data:image/png;base64,${data.image.encode(\"base64\")}`,\n\t\t\t\t\"prompt\": data.prompt,\n\t\t\t}\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({\n\t\"image\": image,\n\t\"prompt\": \"{{inputs.asObj.parameters.prompt}}\",\n}).then((response) => {\n // Use video\n});",
10
11
  "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
11
12
  "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});",
12
13
  "textToSpeech": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
@@ -19,6 +20,7 @@ export const templates = {
19
20
  "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);",
20
21
  "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = client.chatCompletionStream({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t}\n}",
21
22
  "imageToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nconst image = await client.imageToImage({\n{% if endpointUrl %}\n\tendpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n\tprovider: \"{{provider}}\",\n\tmodel: \"{{model.id}}\",\n\tinputs: data,\n\tparameters: { prompt: \"{{inputs.asObj.parameters.prompt}}\", },\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)\n// For example, you can save it to a file or display it in an image element\n",
23
+ "imageToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nconst video = await client.imageToVideo({\n{% if endpointUrl %}\n\tendpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n\tprovider: \"{{provider}}\",\n\tmodel: \"{{model.id}}\",\n\tinputs: data,\n\tparameters: { prompt: \"{{inputs.asObj.parameters.prompt}}\", },\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\n/// Use the generated video (it's a Blob)\n// For example, you can save it to a file or display it in a video element\n",
22
24
  "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)",
23
25
  "textToSpeech": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst audio = await client.textToSpeech({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated audio (it's a Blob)",
24
26
  "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst video = await client.textToVideo({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
@@ -31,6 +33,7 @@ export const templates = {
31
33
  "python": {
32
34
  "fal_client": {
33
35
  "imageToImage": "{%if provider == \"fal-ai\" %}\nimport fal_client\nimport base64\n\ndef on_queue_update(update):\n if isinstance(update, fal_client.InProgress):\n for log in update.logs:\n print(log[\"message\"])\n\nwith open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\nresult = fal_client.subscribe(\n \"fal-ai/flux-kontext/dev\",\n arguments={\n \"prompt\": f\"data:image/png;base64,{image_base_64}\",\n \"image_url\": \"{{ providerInputs.asObj.inputs }}\",\n },\n with_logs=True,\n on_queue_update=on_queue_update,\n)\nprint(result)\n{%endif%}\n",
36
+ "imageToVideo": "{%if provider == \"fal-ai\" %}\nimport fal_client\nimport base64\n\ndef on_queue_update(update):\n if isinstance(update, fal_client.InProgress):\n for log in update.logs:\n print(log[\"message\"])\n\nwith open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\nresult = fal_client.subscribe(\n \"{{model.id}}\",\n arguments={\n \"image_url\": f\"data:image/png;base64,{image_base_64}\",\n \"prompt\": \"{{inputs.asObj.parameters.prompt}}\",\n },\n with_logs=True,\n on_queue_update=on_queue_update,\n)\nprint(result)\n{%endif%}\n",
34
37
  "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} "
35
38
  },
36
39
  "huggingface_hub": {
@@ -40,7 +43,8 @@ export const templates = {
40
43
  "conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
41
44
  "conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ",
42
45
  "documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
43
- "imageToImage": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\n# output is a PIL.Image object\nimage = client.image_to_image(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
46
+ "imageToImage": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\n# output is a PIL.Image object\nimage = client.image_to_image(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n)\n",
47
+ "imageToVideo": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\nvideo = client.image_to_video(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) \n",
44
48
  "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n{% if endpointUrl %}\n base_url=\"{{ baseUrl }}\",\n{% endif %}\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n bill_to=\"{{ billTo }}\",\n{% endif %}\n)",
45
49
  "questionAnswering": "answer = client.question_answering(\n question=\"{{ inputs.asObj.question }}\",\n context=\"{{ inputs.asObj.context }}\",\n model=\"{{ model.id }}\",\n) ",
46
50
  "tableQuestionAnswering": "answer = client.table_question_answering(\n query=\"{{ inputs.asObj.query }}\",\n table={{ inputs.asObj.table }},\n model=\"{{ model.id }}\",\n) ",
@@ -59,7 +63,8 @@ export const templates = {
59
63
  "conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ autoInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])",
60
64
  "conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ autoInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
61
65
  "documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ",
62
- "imageToImage": "with open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
66
+ "imageToImage": "\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
67
+ "imageToVideo": "\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nvideo_bytes = query({\n{{ inputs.asJsonString }}\n})\n",
63
68
  "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\n \"Authorization\": \"{{ authorizationHeader }}\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n{% endif %}\n}",
64
69
  "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
65
70
  "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "4.7.0",
3
+ "version": "4.7.1",
4
4
  "license": "MIT",
5
5
  "author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
6
6
  "description": "Typescript client for the Hugging Face Inference Providers and Inference Endpoints",
@@ -40,8 +40,8 @@
40
40
  },
41
41
  "type": "module",
42
42
  "dependencies": {
43
- "@huggingface/jinja": "^0.5.1",
44
- "@huggingface/tasks": "^0.19.35"
43
+ "@huggingface/tasks": "^0.19.35",
44
+ "@huggingface/jinja": "^0.5.1"
45
45
  },
46
46
  "devDependencies": {
47
47
  "@types/node": "18.13.0"
package/src/package.ts CHANGED
@@ -1,3 +1,3 @@
1
1
  // Generated file from package.json. Issues importing JSON directly when publishing on commonjs/ESM - see https://github.com/microsoft/TypeScript/issues/51783
2
- export const PACKAGE_VERSION = "4.7.0";
2
+ export const PACKAGE_VERSION = "4.7.1";
3
3
  export const PACKAGE_NAME = "@huggingface/inference";
@@ -389,6 +389,7 @@ const snippets: Partial<
389
389
  "image-text-to-text": snippetGenerator("conversational"),
390
390
  "image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
391
391
  "image-to-text": snippetGenerator("basicImage"),
392
+ "image-to-video": snippetGenerator("imageToVideo", prepareImageToImageInput),
392
393
  "object-detection": snippetGenerator("basicImage"),
393
394
  "question-answering": snippetGenerator("questionAnswering", prepareQuestionAnsweringInput),
394
395
  "sentence-similarity": snippetGenerator("basic"),
@@ -7,6 +7,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
7
7
  "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
8
  "conversational": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ \n{{ autoInputs.asTsString }}\n}).then((response) => {\n console.log(JSON.stringify(response));\n});",
9
9
  "imageToImage": "const image = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: {\n\t\t\t\t\"inputs\": `data:image/png;base64,${data.inputs.encode(\"base64\")}`,\n\t\t\t\t\"parameters\": data.parameters,\n\t\t\t}\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ \n\tinputs: image,\n\tparameters: {\n\t\tprompt: \"{{ inputs.asObj.parameters.prompt }}\",\n\t}\n}).then((response) => {\n console.log(JSON.stringify(response));\n});",
10
+ "imageToVideo": "const image = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: {\n\t\t\t\t\"image_url\": `data:image/png;base64,${data.image.encode(\"base64\")}`,\n\t\t\t\t\"prompt\": data.prompt,\n\t\t\t}\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({\n\t\"image\": image,\n\t\"prompt\": \"{{inputs.asObj.parameters.prompt}}\",\n}).then((response) => {\n // Use video\n});",
10
11
  "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
11
12
  "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});",
12
13
  "textToSpeech": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
@@ -19,6 +20,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
19
20
  "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);",
20
21
  "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = client.chatCompletionStream({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t}\n}",
21
22
  "imageToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nconst image = await client.imageToImage({\n{% if endpointUrl %}\n\tendpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n\tprovider: \"{{provider}}\",\n\tmodel: \"{{model.id}}\",\n\tinputs: data,\n\tparameters: { prompt: \"{{inputs.asObj.parameters.prompt}}\", },\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)\n// For example, you can save it to a file or display it in an image element\n",
23
+ "imageToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync(\"{{inputs.asObj.inputs}}\");\n\nconst video = await client.imageToVideo({\n{% if endpointUrl %}\n\tendpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n\tprovider: \"{{provider}}\",\n\tmodel: \"{{model.id}}\",\n\tinputs: data,\n\tparameters: { prompt: \"{{inputs.asObj.parameters.prompt}}\", },\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\n/// Use the generated video (it's a Blob)\n// For example, you can save it to a file or display it in a video element\n",
22
24
  "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)",
23
25
  "textToSpeech": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst audio = await client.textToSpeech({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated audio (it's a Blob)",
24
26
  "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst video = await client.textToVideo({\n{% if endpointUrl %}\n endpointUrl: \"{{ endpointUrl }}\",\n{% endif %}\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
@@ -31,6 +33,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
31
33
  "python": {
32
34
  "fal_client": {
33
35
  "imageToImage": "{%if provider == \"fal-ai\" %}\nimport fal_client\nimport base64\n\ndef on_queue_update(update):\n if isinstance(update, fal_client.InProgress):\n for log in update.logs:\n print(log[\"message\"])\n\nwith open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\nresult = fal_client.subscribe(\n \"fal-ai/flux-kontext/dev\",\n arguments={\n \"prompt\": f\"data:image/png;base64,{image_base_64}\",\n \"image_url\": \"{{ providerInputs.asObj.inputs }}\",\n },\n with_logs=True,\n on_queue_update=on_queue_update,\n)\nprint(result)\n{%endif%}\n",
36
+ "imageToVideo": "{%if provider == \"fal-ai\" %}\nimport fal_client\nimport base64\n\ndef on_queue_update(update):\n if isinstance(update, fal_client.InProgress):\n for log in update.logs:\n print(log[\"message\"])\n\nwith open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\nresult = fal_client.subscribe(\n \"{{model.id}}\",\n arguments={\n \"image_url\": f\"data:image/png;base64,{image_base_64}\",\n \"prompt\": \"{{inputs.asObj.parameters.prompt}}\",\n },\n with_logs=True,\n on_queue_update=on_queue_update,\n)\nprint(result)\n{%endif%}\n",
34
37
  "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} "
35
38
  },
36
39
  "huggingface_hub": {
@@ -40,7 +43,8 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
40
43
  "conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
41
44
  "conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ",
42
45
  "documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
43
- "imageToImage": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\n# output is a PIL.Image object\nimage = client.image_to_image(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
46
+ "imageToImage": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\n# output is a PIL.Image object\nimage = client.image_to_image(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n)\n",
47
+ "imageToVideo": "with open(\"{{ inputs.asObj.inputs }}\", \"rb\") as image_file:\n input_image = image_file.read()\n\nvideo = client.image_to_video(\n input_image,\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) \n",
44
48
  "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n{% if endpointUrl %}\n base_url=\"{{ baseUrl }}\",\n{% endif %}\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n bill_to=\"{{ billTo }}\",\n{% endif %}\n)",
45
49
  "questionAnswering": "answer = client.question_answering(\n question=\"{{ inputs.asObj.question }}\",\n context=\"{{ inputs.asObj.context }}\",\n model=\"{{ model.id }}\",\n) ",
46
50
  "tableQuestionAnswering": "answer = client.table_question_answering(\n query=\"{{ inputs.asObj.query }}\",\n table={{ inputs.asObj.table }},\n model=\"{{ model.id }}\",\n) ",
@@ -59,7 +63,8 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
59
63
  "conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ autoInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])",
60
64
  "conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ autoInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
61
65
  "documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ",
62
- "imageToImage": "with open(\"{{inputs.asObj.inputs}}\", \"rb\") as image_file:\n image_base_64 = base64.b64encode(image_file.read()).decode('utf-8')\n\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
66
+ "imageToImage": "\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
67
+ "imageToVideo": "\ndef query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nvideo_bytes = query({\n{{ inputs.asJsonString }}\n})\n",
63
68
  "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\n \"Authorization\": \"{{ authorizationHeader }}\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n{% endif %}\n}",
64
69
  "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
65
70
  "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",