@huggingface/inference 3.6.0 → 3.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/index.cjs +116 -28
  2. package/dist/index.js +116 -17
  3. package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -1
  4. package/dist/src/snippets/templates.exported.d.ts +2 -0
  5. package/dist/src/snippets/templates.exported.d.ts.map +1 -0
  6. package/package.json +9 -14
  7. package/src/snippets/getInferenceSnippets.ts +6 -24
  8. package/src/snippets/templates.exported.ts +72 -0
  9. package/dist/browser/index.cjs +0 -1652
  10. package/dist/browser/index.js +0 -1652
  11. package/src/snippets/templates/js/fetch/basic.jinja +0 -19
  12. package/src/snippets/templates/js/fetch/basicAudio.jinja +0 -19
  13. package/src/snippets/templates/js/fetch/basicImage.jinja +0 -19
  14. package/src/snippets/templates/js/fetch/textToAudio.jinja +0 -41
  15. package/src/snippets/templates/js/fetch/textToImage.jinja +0 -19
  16. package/src/snippets/templates/js/fetch/zeroShotClassification.jinja +0 -22
  17. package/src/snippets/templates/js/huggingface.js/basic.jinja +0 -11
  18. package/src/snippets/templates/js/huggingface.js/basicAudio.jinja +0 -13
  19. package/src/snippets/templates/js/huggingface.js/basicImage.jinja +0 -13
  20. package/src/snippets/templates/js/huggingface.js/conversational.jinja +0 -11
  21. package/src/snippets/templates/js/huggingface.js/conversationalStream.jinja +0 -19
  22. package/src/snippets/templates/js/huggingface.js/textToImage.jinja +0 -11
  23. package/src/snippets/templates/js/huggingface.js/textToVideo.jinja +0 -10
  24. package/src/snippets/templates/js/openai/conversational.jinja +0 -13
  25. package/src/snippets/templates/js/openai/conversationalStream.jinja +0 -22
  26. package/src/snippets/templates/python/fal_client/textToImage.jinja +0 -11
  27. package/src/snippets/templates/python/huggingface_hub/basic.jinja +0 -4
  28. package/src/snippets/templates/python/huggingface_hub/basicAudio.jinja +0 -1
  29. package/src/snippets/templates/python/huggingface_hub/basicImage.jinja +0 -1
  30. package/src/snippets/templates/python/huggingface_hub/conversational.jinja +0 -6
  31. package/src/snippets/templates/python/huggingface_hub/conversationalStream.jinja +0 -8
  32. package/src/snippets/templates/python/huggingface_hub/documentQuestionAnswering.jinja +0 -5
  33. package/src/snippets/templates/python/huggingface_hub/imageToImage.jinja +0 -6
  34. package/src/snippets/templates/python/huggingface_hub/importInferenceClient.jinja +0 -6
  35. package/src/snippets/templates/python/huggingface_hub/textToImage.jinja +0 -5
  36. package/src/snippets/templates/python/huggingface_hub/textToVideo.jinja +0 -4
  37. package/src/snippets/templates/python/openai/conversational.jinja +0 -13
  38. package/src/snippets/templates/python/openai/conversationalStream.jinja +0 -15
  39. package/src/snippets/templates/python/requests/basic.jinja +0 -7
  40. package/src/snippets/templates/python/requests/basicAudio.jinja +0 -7
  41. package/src/snippets/templates/python/requests/basicImage.jinja +0 -7
  42. package/src/snippets/templates/python/requests/conversational.jinja +0 -9
  43. package/src/snippets/templates/python/requests/conversationalStream.jinja +0 -16
  44. package/src/snippets/templates/python/requests/documentQuestionAnswering.jinja +0 -13
  45. package/src/snippets/templates/python/requests/imageToImage.jinja +0 -15
  46. package/src/snippets/templates/python/requests/importRequests.jinja +0 -10
  47. package/src/snippets/templates/python/requests/tabular.jinja +0 -9
  48. package/src/snippets/templates/python/requests/textToAudio.jinja +0 -23
  49. package/src/snippets/templates/python/requests/textToImage.jinja +0 -14
  50. package/src/snippets/templates/python/requests/zeroShotClassification.jinja +0 -8
  51. package/src/snippets/templates/python/requests/zeroShotImageClassification.jinja +0 -14
  52. package/src/snippets/templates/sh/curl/basic.jinja +0 -7
  53. package/src/snippets/templates/sh/curl/basicAudio.jinja +0 -5
  54. package/src/snippets/templates/sh/curl/basicImage.jinja +0 -5
  55. package/src/snippets/templates/sh/curl/conversational.jinja +0 -7
  56. package/src/snippets/templates/sh/curl/conversationalStream.jinja +0 -7
  57. package/src/snippets/templates/sh/curl/zeroShotClassification.jinja +0 -5
package/dist/index.cjs CHANGED
@@ -1,9 +1,7 @@
1
1
  "use strict";
2
- var __create = Object.create;
3
2
  var __defProp = Object.defineProperty;
4
3
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
4
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __getProtoOf = Object.getPrototypeOf;
7
5
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
6
  var __export = (target, all) => {
9
7
  for (var name2 in all)
@@ -17,14 +15,6 @@ var __copyProps = (to, from, except, desc) => {
17
15
  }
18
16
  return to;
19
17
  };
20
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
- // If the importer is in node compatibility mode or this is not an ESM
22
- // file that has been converted to a CommonJS file using a Babel-
23
- // compatible transform (i.e. "__esModule" has not been set), then set
24
- // "default" to the CommonJS "module.exports" for node compatibility.
25
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
- mod
27
- ));
28
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
19
 
30
20
  // src/index.ts
@@ -441,7 +431,7 @@ function isUrl(modelOrUrl) {
441
431
 
442
432
  // package.json
443
433
  var name = "@huggingface/inference";
444
- var version = "3.6.0";
434
+ var version = "3.6.1";
445
435
 
446
436
  // src/providers/consts.ts
447
437
  var HARDCODED_MODEL_ID_MAPPING = {
@@ -1673,10 +1663,116 @@ __export(snippets_exports, {
1673
1663
  // src/snippets/getInferenceSnippets.ts
1674
1664
  var import_tasks = require("@huggingface/tasks");
1675
1665
  var import_jinja = require("@huggingface/jinja");
1676
- var import_fs = __toESM(require("fs"), 1);
1677
- var import_path = __toESM(require("path"), 1);
1678
- var import_node_fs = require("fs");
1679
- var import_meta = {};
1666
+
1667
+ // src/snippets/templates.exported.ts
1668
+ var templates = {
1669
+ "js": {
1670
+ "fetch": {
1671
+ "basic": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1672
+ "basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1673
+ "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1674
+ "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
1675
+ "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});',
1676
+ "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
1677
+ },
1678
+ "huggingface.js": {
1679
+ "basic": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst output = await client.{{ methodName }}({\n model: "{{ model.id }}",\n inputs: {{ inputs.asObj.inputs }},\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1680
+ "basicAudio": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1681
+ "basicImage": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1682
+ "conversational": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst chatCompletion = await client.chatCompletion({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
1683
+ "conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream = await client.chatCompletionStream({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}',
1684
+ "textToImage": `import { InferenceClient } from "@huggingface/inference";
1685
+
1686
+ const client = new InferenceClient("{{ accessToken }}");
1687
+
1688
+ const image = await client.textToImage({
1689
+ provider: "{{ provider }}",
1690
+ model: "{{ model.id }}",
1691
+ inputs: {{ inputs.asObj.inputs }},
1692
+ parameters: { num_inference_steps: 5 },
1693
+ });
1694
+ /// Use the generated image (it's a Blob)`,
1695
+ "textToVideo": `import { InferenceClient } from "@huggingface/inference";
1696
+
1697
+ const client = new InferenceClient("{{ accessToken }}");
1698
+
1699
+ const image = await client.textToVideo({
1700
+ provider: "{{ provider }}",
1701
+ model: "{{ model.id }}",
1702
+ inputs: {{ inputs.asObj.inputs }},
1703
+ });
1704
+ // Use the generated video (it's a Blob)`
1705
+ },
1706
+ "openai": {
1707
+ "conversational": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
1708
+ "conversationalStream": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nlet out = "";\n\nconst stream = await client.chat.completions.create({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}'
1709
+ }
1710
+ },
1711
+ "python": {
1712
+ "fal_client": {
1713
+ "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
1714
+ },
1715
+ "huggingface_hub": {
1716
+ "basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
1717
+ "basicAudio": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
1718
+ "basicImage": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
1719
+ "conversational": 'completion = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
1720
+ "conversationalStream": 'stream = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="") ',
1721
+ "documentQuestionAnswering": 'output = client.document_question_answering(\n "{{ inputs.asObj.image }}",\n question="{{ inputs.asObj.question }}",\n model="{{ model.id }}",\n) ',
1722
+ "imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
1723
+ "importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n)',
1724
+ "textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
1725
+ "textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
1726
+ },
1727
+ "openai": {
1728
+ "conversational": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\ncompletion = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
1729
+ "conversationalStream": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\nstream = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="")'
1730
+ },
1731
+ "requests": {
1732
+ "basic": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n}) ',
1733
+ "basicAudio": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "audio/flac", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
1734
+ "basicImage": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "image/jpeg", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
1735
+ "conversational": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response["choices"][0]["message"])',
1736
+ "conversationalStream": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b"data:"):\n continue\n if line.strip() == b"data: [DONE]":\n return\n yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n "stream": True,\n})\n\nfor chunk in chunks:\n print(chunk["choices"][0]["delta"]["content"], end="")',
1737
+ "documentQuestionAnswering": 'def query(payload):\n with open(payload["image"], "rb") as f:\n img = f.read()\n payload["image"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {\n "image": "{{ inputs.asObj.image }}",\n "question": "{{ inputs.asObj.question }}",\n },\n}) ',
1738
+ "imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
1739
+ "importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {"Authorization": "{{ authorizationHeader }}"}',
1740
+ "tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
1741
+ "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
1742
+ "textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
1743
+ "zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
1744
+ "zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
1745
+ }
1746
+ },
1747
+ "sh": {
1748
+ "curl": {
1749
+ "basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
1750
+ "basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
1751
+ "basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
1752
+ "conversational": `curl {{ fullUrl }} \\
1753
+ -H 'Authorization: {{ authorizationHeader }}' \\
1754
+ -H 'Content-Type: application/json' \\
1755
+ -d '{
1756
+ {{ providerInputs.asCurlString }},
1757
+ "stream": false
1758
+ }'`,
1759
+ "conversationalStream": `curl {{ fullUrl }} \\
1760
+ -H 'Authorization: {{ authorizationHeader }}' \\
1761
+ -H 'Content-Type: application/json' \\
1762
+ -d '{
1763
+ {{ providerInputs.asCurlString }},
1764
+ "stream": true
1765
+ }'`,
1766
+ "zeroShotClassification": `curl {{ fullUrl }} \\
1767
+ -X POST \\
1768
+ -d '{"inputs": {{ providerInputs.asObj.inputs }}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
1769
+ -H 'Content-Type: application/json' \\
1770
+ -H 'Authorization: {{ authorizationHeader }}'`
1771
+ }
1772
+ }
1773
+ };
1774
+
1775
+ // src/snippets/getInferenceSnippets.ts
1680
1776
  var PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"];
1681
1777
  var JS_CLIENTS = ["fetch", "huggingface.js", "openai"];
1682
1778
  var SH_CLIENTS = ["curl"];
@@ -1685,20 +1781,12 @@ var CLIENTS = {
1685
1781
  python: [...PYTHON_CLIENTS],
1686
1782
  sh: [...SH_CLIENTS]
1687
1783
  };
1688
- var rootDirFinder = () => {
1689
- let currentPath = typeof import_meta !== "undefined" && import_meta.url ? import_path.default.normalize(new URL(import_meta.url).pathname) : __dirname;
1690
- while (currentPath !== "/") {
1691
- if ((0, import_node_fs.existsSync)(import_path.default.join(currentPath, "package.json"))) {
1692
- return currentPath;
1693
- }
1694
- currentPath = import_path.default.normalize(import_path.default.join(currentPath, ".."));
1695
- }
1696
- return "/";
1697
- };
1698
- var templatePath = (language, client, templateName) => import_path.default.join(rootDirFinder(), "src", "snippets", "templates", language, client, `${templateName}.jinja`);
1699
- var hasTemplate = (language, client, templateName) => (0, import_node_fs.existsSync)(templatePath(language, client, templateName));
1784
+ var hasTemplate = (language, client, templateName) => templates[language]?.[client]?.[templateName] !== void 0;
1700
1785
  var loadTemplate = (language, client, templateName) => {
1701
- const template = import_fs.default.readFileSync(templatePath(language, client, templateName), "utf8");
1786
+ const template = templates[language]?.[client]?.[templateName];
1787
+ if (!template) {
1788
+ throw new Error(`Template not found: ${language}/${client}/${templateName}`);
1789
+ }
1702
1790
  return (data) => new import_jinja.Template(template).render({ ...data });
1703
1791
  };
1704
1792
  var snippetImportPythonInferenceClient = loadTemplate("python", "huggingface_hub", "importInferenceClient");
package/dist/index.js CHANGED
@@ -374,7 +374,7 @@ function isUrl(modelOrUrl) {
374
374
 
375
375
  // package.json
376
376
  var name = "@huggingface/inference";
377
- var version = "3.6.0";
377
+ var version = "3.6.1";
378
378
 
379
379
  // src/providers/consts.ts
380
380
  var HARDCODED_MODEL_ID_MAPPING = {
@@ -1609,9 +1609,116 @@ import {
1609
1609
  getModelInputSnippet
1610
1610
  } from "@huggingface/tasks";
1611
1611
  import { Template } from "@huggingface/jinja";
1612
- import fs from "fs";
1613
- import path from "path";
1614
- import { existsSync as pathExists } from "node:fs";
1612
+
1613
+ // src/snippets/templates.exported.ts
1614
+ var templates = {
1615
+ "js": {
1616
+ "fetch": {
1617
+ "basic": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1618
+ "basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1619
+ "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1620
+ "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
1621
+ "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});',
1622
+ "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
1623
+ },
1624
+ "huggingface.js": {
1625
+ "basic": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst output = await client.{{ methodName }}({\n model: "{{ model.id }}",\n inputs: {{ inputs.asObj.inputs }},\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1626
+ "basicAudio": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1627
+ "basicImage": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1628
+ "conversational": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst chatCompletion = await client.chatCompletion({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
1629
+ "conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream = await client.chatCompletionStream({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}',
1630
+ "textToImage": `import { InferenceClient } from "@huggingface/inference";
1631
+
1632
+ const client = new InferenceClient("{{ accessToken }}");
1633
+
1634
+ const image = await client.textToImage({
1635
+ provider: "{{ provider }}",
1636
+ model: "{{ model.id }}",
1637
+ inputs: {{ inputs.asObj.inputs }},
1638
+ parameters: { num_inference_steps: 5 },
1639
+ });
1640
+ /// Use the generated image (it's a Blob)`,
1641
+ "textToVideo": `import { InferenceClient } from "@huggingface/inference";
1642
+
1643
+ const client = new InferenceClient("{{ accessToken }}");
1644
+
1645
+ const image = await client.textToVideo({
1646
+ provider: "{{ provider }}",
1647
+ model: "{{ model.id }}",
1648
+ inputs: {{ inputs.asObj.inputs }},
1649
+ });
1650
+ // Use the generated video (it's a Blob)`
1651
+ },
1652
+ "openai": {
1653
+ "conversational": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
1654
+ "conversationalStream": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nlet out = "";\n\nconst stream = await client.chat.completions.create({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}'
1655
+ }
1656
+ },
1657
+ "python": {
1658
+ "fal_client": {
1659
+ "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
1660
+ },
1661
+ "huggingface_hub": {
1662
+ "basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
1663
+ "basicAudio": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
1664
+ "basicImage": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
1665
+ "conversational": 'completion = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
1666
+ "conversationalStream": 'stream = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="") ',
1667
+ "documentQuestionAnswering": 'output = client.document_question_answering(\n "{{ inputs.asObj.image }}",\n question="{{ inputs.asObj.question }}",\n model="{{ model.id }}",\n) ',
1668
+ "imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
1669
+ "importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n)',
1670
+ "textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
1671
+ "textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
1672
+ },
1673
+ "openai": {
1674
+ "conversational": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\ncompletion = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
1675
+ "conversationalStream": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\nstream = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="")'
1676
+ },
1677
+ "requests": {
1678
+ "basic": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n}) ',
1679
+ "basicAudio": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "audio/flac", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
1680
+ "basicImage": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "image/jpeg", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
1681
+ "conversational": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response["choices"][0]["message"])',
1682
+ "conversationalStream": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b"data:"):\n continue\n if line.strip() == b"data: [DONE]":\n return\n yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n "stream": True,\n})\n\nfor chunk in chunks:\n print(chunk["choices"][0]["delta"]["content"], end="")',
1683
+ "documentQuestionAnswering": 'def query(payload):\n with open(payload["image"], "rb") as f:\n img = f.read()\n payload["image"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {\n "image": "{{ inputs.asObj.image }}",\n "question": "{{ inputs.asObj.question }}",\n },\n}) ',
1684
+ "imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
1685
+ "importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {"Authorization": "{{ authorizationHeader }}"}',
1686
+ "tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
1687
+ "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
1688
+ "textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
1689
+ "zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
1690
+ "zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
1691
+ }
1692
+ },
1693
+ "sh": {
1694
+ "curl": {
1695
+ "basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
1696
+ "basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
1697
+ "basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
1698
+ "conversational": `curl {{ fullUrl }} \\
1699
+ -H 'Authorization: {{ authorizationHeader }}' \\
1700
+ -H 'Content-Type: application/json' \\
1701
+ -d '{
1702
+ {{ providerInputs.asCurlString }},
1703
+ "stream": false
1704
+ }'`,
1705
+ "conversationalStream": `curl {{ fullUrl }} \\
1706
+ -H 'Authorization: {{ authorizationHeader }}' \\
1707
+ -H 'Content-Type: application/json' \\
1708
+ -d '{
1709
+ {{ providerInputs.asCurlString }},
1710
+ "stream": true
1711
+ }'`,
1712
+ "zeroShotClassification": `curl {{ fullUrl }} \\
1713
+ -X POST \\
1714
+ -d '{"inputs": {{ providerInputs.asObj.inputs }}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
1715
+ -H 'Content-Type: application/json' \\
1716
+ -H 'Authorization: {{ authorizationHeader }}'`
1717
+ }
1718
+ }
1719
+ };
1720
+
1721
+ // src/snippets/getInferenceSnippets.ts
1615
1722
  var PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"];
1616
1723
  var JS_CLIENTS = ["fetch", "huggingface.js", "openai"];
1617
1724
  var SH_CLIENTS = ["curl"];
@@ -1620,20 +1727,12 @@ var CLIENTS = {
1620
1727
  python: [...PYTHON_CLIENTS],
1621
1728
  sh: [...SH_CLIENTS]
1622
1729
  };
1623
- var rootDirFinder = () => {
1624
- let currentPath = typeof import.meta !== "undefined" && import.meta.url ? path.normalize(new URL(import.meta.url).pathname) : __dirname;
1625
- while (currentPath !== "/") {
1626
- if (pathExists(path.join(currentPath, "package.json"))) {
1627
- return currentPath;
1628
- }
1629
- currentPath = path.normalize(path.join(currentPath, ".."));
1630
- }
1631
- return "/";
1632
- };
1633
- var templatePath = (language, client, templateName) => path.join(rootDirFinder(), "src", "snippets", "templates", language, client, `${templateName}.jinja`);
1634
- var hasTemplate = (language, client, templateName) => pathExists(templatePath(language, client, templateName));
1730
+ var hasTemplate = (language, client, templateName) => templates[language]?.[client]?.[templateName] !== void 0;
1635
1731
  var loadTemplate = (language, client, templateName) => {
1636
- const template = fs.readFileSync(templatePath(language, client, templateName), "utf8");
1732
+ const template = templates[language]?.[client]?.[templateName];
1733
+ if (!template) {
1734
+ throw new Error(`Template not found: ${language}/${client}/${templateName}`);
1735
+ }
1637
1736
  return (data) => new Template(template).render({ ...data });
1638
1737
  };
1639
1738
  var snippetImportPythonInferenceClient = loadTemplate("python", "huggingface_hub", "importInferenceClient");
@@ -1 +1 @@
1
- {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AAEA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAC5B,OAAO,KAAK,EAAE,iBAAiB,EAA8B,MAAM,UAAU,CAAC;AAkT9E,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
1
+ {"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AAEA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAC5B,OAAO,KAAK,EAAE,iBAAiB,EAA8B,MAAM,UAAU,CAAC;AAgS9E,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -0,0 +1,2 @@
1
+ export declare const templates: Record<string, Record<string, Record<string, string>>>;
2
+ //# sourceMappingURL=templates.exported.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CAsEnE,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "3.6.0",
3
+ "version": "3.6.1",
4
4
  "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -26,23 +26,17 @@
26
26
  },
27
27
  "files": [
28
28
  "dist",
29
- "src"
29
+ "src",
30
+ "!src/snippets/templates/**/*.jinja"
30
31
  ],
31
32
  "source": "src/index.ts",
32
33
  "types": "./dist/src/index.d.ts",
33
34
  "main": "./dist/index.cjs",
34
35
  "module": "./dist/index.js",
35
36
  "exports": {
36
- ".": {
37
- "types": "./dist/src/index.d.ts",
38
- "require": "./dist/index.cjs",
39
- "import": "./dist/index.js"
40
- }
41
- },
42
- "browser": {
43
- "./src/snippets/index.js": false,
44
- "./dist/index.js": "./dist/browser/index.js",
45
- "./dist/index.mjs": "./dist/browser/index.mjs"
37
+ "types": "./dist/src/index.d.ts",
38
+ "require": "./dist/index.cjs",
39
+ "import": "./dist/index.js"
46
40
  },
47
41
  "type": "module",
48
42
  "dependencies": {
@@ -54,7 +48,7 @@
54
48
  },
55
49
  "resolutions": {},
56
50
  "scripts": {
57
- "build": "tsup src/index.ts --format cjs,esm --clean && tsc --emitDeclarationOnly --declaration",
51
+ "build": "pnpm run export-templates && tsup src/index.ts --format cjs,esm --clean && tsc --emitDeclarationOnly --declaration",
58
52
  "dts": "tsx scripts/generate-dts.ts && tsc --noEmit dist/index.d.ts",
59
53
  "lint": "eslint --quiet --fix --ext .cjs,.ts .",
60
54
  "lint:check": "eslint --ext .cjs,.ts .",
@@ -63,6 +57,7 @@
63
57
  "test": "vitest run --config vitest.config.mts",
64
58
  "test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.mts",
65
59
  "check": "tsc",
66
- "dev": "tsup src/index.ts --format cjs,esm --watch"
60
+ "dev": "pnpm run export-templates && tsup src/index.ts --format cjs,esm --watch",
61
+ "export-templates": "tsx scripts/export-templates.ts"
67
62
  }
68
63
  }
@@ -10,9 +10,7 @@ import {
10
10
  import type { InferenceProvider, InferenceTask, RequestArgs } from "../types";
11
11
  import { Template } from "@huggingface/jinja";
12
12
  import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions";
13
- import fs from "fs";
14
- import path from "path";
15
- import { existsSync as pathExists } from "node:fs";
13
+ import { templates } from "./templates.exported";
16
14
 
17
15
  const PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"] as const;
18
16
  const JS_CLIENTS = ["fetch", "huggingface.js", "openai"] as const;
@@ -44,34 +42,18 @@ interface TemplateParams {
44
42
 
45
43
  // Helpers to find + load templates
46
44
 
47
- const rootDirFinder = (): string => {
48
- let currentPath =
49
- typeof import.meta !== "undefined" && import.meta.url
50
- ? path.normalize(new URL(import.meta.url).pathname) /// for ESM
51
- : __dirname; /// for CJS
52
-
53
- while (currentPath !== "/") {
54
- if (pathExists(path.join(currentPath, "package.json"))) {
55
- return currentPath;
56
- }
57
-
58
- currentPath = path.normalize(path.join(currentPath, ".."));
59
- }
60
-
61
- return "/";
62
- };
63
-
64
- const templatePath = (language: InferenceSnippetLanguage, client: Client, templateName: string): string =>
65
- path.join(rootDirFinder(), "src", "snippets", "templates", language, client, `${templateName}.jinja`);
66
45
  const hasTemplate = (language: InferenceSnippetLanguage, client: Client, templateName: string): boolean =>
67
- pathExists(templatePath(language, client, templateName));
46
+ templates[language]?.[client]?.[templateName] !== undefined;
68
47
 
69
48
  const loadTemplate = (
70
49
  language: InferenceSnippetLanguage,
71
50
  client: Client,
72
51
  templateName: string
73
52
  ): ((data: TemplateParams) => string) => {
74
- const template = fs.readFileSync(templatePath(language, client, templateName), "utf8");
53
+ const template = templates[language]?.[client]?.[templateName];
54
+ if (!template) {
55
+ throw new Error(`Template not found: ${language}/${client}/${templateName}`);
56
+ }
75
57
  return (data: TemplateParams) => new Template(template).render({ ...data });
76
58
  };
77
59
 
@@ -0,0 +1,72 @@
1
+ // Generated file - do not edit directly
2
+ export const templates: Record<string, Record<string, Record<string, string>>> = {
3
+ "js": {
4
+ "fetch": {
5
+ "basic": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
6
+ "basicAudio": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"audio/flac\"\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
7
+ "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\"\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
+ "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
9
+ "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});",
10
+ "zeroShotClassification": "async function query(data) {\n const response = await fetch(\n\t\t\"{{ fullUrl }}\",\n {\n headers: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n \"Content-Type\": \"application/json\",\n },\n method: \"POST\",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: [\"refund\", \"legal\", \"faq\"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});"
11
+ },
12
+ "huggingface.js": {
13
+ "basic": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst output = await client.{{ methodName }}({\n\tmodel: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
14
+ "basicAudio": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
15
+ "basicImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
16
+ "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
17
+ "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = await client.chatCompletionStream({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t} \n}",
18
+ "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n});\n/// Use the generated image (it's a Blob)",
19
+ "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n});\n// Use the generated video (it's a Blob)"
20
+ },
21
+ "openai": {
22
+ "conversational": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n\tmodel: \"{{ providerModelId }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
23
+ "conversationalStream": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n});\n\nlet out = \"\";\n\nconst stream = await client.chat.completions.create({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t} \n}"
24
+ }
25
+ },
26
+ "python": {
27
+ "fal_client": {
28
+ "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} "
29
+ },
30
+ "huggingface_hub": {
31
+ "basic": "result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n)",
32
+ "basicAudio": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
33
+ "basicImage": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
34
+ "conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
35
+ "conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ",
36
+ "documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
37
+ "imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
38
+ "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n)",
39
+ "textToImage": "# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) ",
40
+ "textToVideo": "video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) "
41
+ },
42
+ "openai": {
43
+ "conversational": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\"\n)\n\ncompletion = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
44
+ "conversationalStream": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\"\n)\n\nstream = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\")"
45
+ },
46
+ "requests": {
47
+ "basic": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n}) ",
48
+ "basicAudio": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"audio/flac\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})",
49
+ "basicImage": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"image/jpeg\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})",
50
+ "conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])",
51
+ "conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
52
+ "documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ",
53
+ "imageToImage": "def query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
54
+ "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\"Authorization\": \"{{ authorizationHeader }}\"}",
55
+ "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
56
+ "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
57
+ "textToImage": "{% if provider == \"hf-inference\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}",
58
+ "zeroShotClassification": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]},\n}) ",
59
+ "zeroShotImageClassification": "def query(data):\n with open(data[\"image_path\"], \"rb\") as f:\n img = f.read()\n payload={\n \"parameters\": data[\"parameters\"],\n \"inputs\": base64.b64encode(img).decode(\"utf-8\")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"image_path\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"cat\", \"dog\", \"llama\"]},\n}) "
60
+ }
61
+ },
62
+ "sh": {
63
+ "curl": {
64
+ "basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
65
+ "basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
66
+ "basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
67
+ "conversational": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": false\n }'",
68
+ "conversationalStream": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": true\n }'",
69
+ "zeroShotClassification": "curl {{ fullUrl }} \\\n -X POST \\\n -d '{\"inputs\": {{ providerInputs.asObj.inputs }}, \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]}}' \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: {{ authorizationHeader }}'"
70
+ }
71
+ }
72
+ } as const;