@huggingface/inference 3.6.0 → 3.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/dist/index.cjs +240 -71
  2. package/dist/index.js +240 -60
  3. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  4. package/dist/src/providers/black-forest-labs.d.ts.map +1 -1
  5. package/dist/src/providers/cerebras.d.ts.map +1 -1
  6. package/dist/src/providers/cohere.d.ts.map +1 -1
  7. package/dist/src/providers/fal-ai.d.ts +6 -16
  8. package/dist/src/providers/fal-ai.d.ts.map +1 -1
  9. package/dist/src/providers/fireworks-ai.d.ts.map +1 -1
  10. package/dist/src/providers/hf-inference.d.ts.map +1 -1
  11. package/dist/src/providers/hyperbolic.d.ts.map +1 -1
  12. package/dist/src/providers/nebius.d.ts.map +1 -1
  13. package/dist/src/providers/novita.d.ts.map +1 -1
  14. package/dist/src/providers/openai.d.ts.map +1 -1
  15. package/dist/src/providers/replicate.d.ts.map +1 -1
  16. package/dist/src/providers/sambanova.d.ts.map +1 -1
  17. package/dist/src/providers/together.d.ts.map +1 -1
  18. package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -1
  19. package/dist/src/snippets/templates.exported.d.ts +2 -0
  20. package/dist/src/snippets/templates.exported.d.ts.map +1 -0
  21. package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
  22. package/dist/src/types.d.ts +4 -2
  23. package/dist/src/types.d.ts.map +1 -1
  24. package/dist/test/InferenceClient.spec.d.ts.map +1 -1
  25. package/package.json +10 -15
  26. package/src/lib/makeRequestOptions.ts +3 -1
  27. package/src/providers/black-forest-labs.ts +6 -2
  28. package/src/providers/cerebras.ts +6 -2
  29. package/src/providers/cohere.ts +6 -2
  30. package/src/providers/fal-ai.ts +85 -3
  31. package/src/providers/fireworks-ai.ts +6 -2
  32. package/src/providers/hf-inference.ts +6 -2
  33. package/src/providers/hyperbolic.ts +6 -2
  34. package/src/providers/nebius.ts +6 -2
  35. package/src/providers/novita.ts +5 -2
  36. package/src/providers/openai.ts +6 -2
  37. package/src/providers/replicate.ts +6 -2
  38. package/src/providers/sambanova.ts +6 -2
  39. package/src/providers/together.ts +6 -2
  40. package/src/snippets/getInferenceSnippets.ts +6 -24
  41. package/src/snippets/templates.exported.ts +72 -0
  42. package/src/tasks/cv/textToVideo.ts +5 -21
  43. package/src/types.ts +5 -2
  44. package/dist/browser/index.cjs +0 -1652
  45. package/dist/browser/index.js +0 -1652
  46. package/src/snippets/templates/js/fetch/basic.jinja +0 -19
  47. package/src/snippets/templates/js/fetch/basicAudio.jinja +0 -19
  48. package/src/snippets/templates/js/fetch/basicImage.jinja +0 -19
  49. package/src/snippets/templates/js/fetch/textToAudio.jinja +0 -41
  50. package/src/snippets/templates/js/fetch/textToImage.jinja +0 -19
  51. package/src/snippets/templates/js/fetch/zeroShotClassification.jinja +0 -22
  52. package/src/snippets/templates/js/huggingface.js/basic.jinja +0 -11
  53. package/src/snippets/templates/js/huggingface.js/basicAudio.jinja +0 -13
  54. package/src/snippets/templates/js/huggingface.js/basicImage.jinja +0 -13
  55. package/src/snippets/templates/js/huggingface.js/conversational.jinja +0 -11
  56. package/src/snippets/templates/js/huggingface.js/conversationalStream.jinja +0 -19
  57. package/src/snippets/templates/js/huggingface.js/textToImage.jinja +0 -11
  58. package/src/snippets/templates/js/huggingface.js/textToVideo.jinja +0 -10
  59. package/src/snippets/templates/js/openai/conversational.jinja +0 -13
  60. package/src/snippets/templates/js/openai/conversationalStream.jinja +0 -22
  61. package/src/snippets/templates/python/fal_client/textToImage.jinja +0 -11
  62. package/src/snippets/templates/python/huggingface_hub/basic.jinja +0 -4
  63. package/src/snippets/templates/python/huggingface_hub/basicAudio.jinja +0 -1
  64. package/src/snippets/templates/python/huggingface_hub/basicImage.jinja +0 -1
  65. package/src/snippets/templates/python/huggingface_hub/conversational.jinja +0 -6
  66. package/src/snippets/templates/python/huggingface_hub/conversationalStream.jinja +0 -8
  67. package/src/snippets/templates/python/huggingface_hub/documentQuestionAnswering.jinja +0 -5
  68. package/src/snippets/templates/python/huggingface_hub/imageToImage.jinja +0 -6
  69. package/src/snippets/templates/python/huggingface_hub/importInferenceClient.jinja +0 -6
  70. package/src/snippets/templates/python/huggingface_hub/textToImage.jinja +0 -5
  71. package/src/snippets/templates/python/huggingface_hub/textToVideo.jinja +0 -4
  72. package/src/snippets/templates/python/openai/conversational.jinja +0 -13
  73. package/src/snippets/templates/python/openai/conversationalStream.jinja +0 -15
  74. package/src/snippets/templates/python/requests/basic.jinja +0 -7
  75. package/src/snippets/templates/python/requests/basicAudio.jinja +0 -7
  76. package/src/snippets/templates/python/requests/basicImage.jinja +0 -7
  77. package/src/snippets/templates/python/requests/conversational.jinja +0 -9
  78. package/src/snippets/templates/python/requests/conversationalStream.jinja +0 -16
  79. package/src/snippets/templates/python/requests/documentQuestionAnswering.jinja +0 -13
  80. package/src/snippets/templates/python/requests/imageToImage.jinja +0 -15
  81. package/src/snippets/templates/python/requests/importRequests.jinja +0 -10
  82. package/src/snippets/templates/python/requests/tabular.jinja +0 -9
  83. package/src/snippets/templates/python/requests/textToAudio.jinja +0 -23
  84. package/src/snippets/templates/python/requests/textToImage.jinja +0 -14
  85. package/src/snippets/templates/python/requests/zeroShotClassification.jinja +0 -8
  86. package/src/snippets/templates/python/requests/zeroShotImageClassification.jinja +0 -14
  87. package/src/snippets/templates/sh/curl/basic.jinja +0 -7
  88. package/src/snippets/templates/sh/curl/basicAudio.jinja +0 -5
  89. package/src/snippets/templates/sh/curl/basicImage.jinja +0 -5
  90. package/src/snippets/templates/sh/curl/conversational.jinja +0 -7
  91. package/src/snippets/templates/sh/curl/conversationalStream.jinja +0 -7
  92. package/src/snippets/templates/sh/curl/zeroShotClassification.jinja +0 -5
@@ -0,0 +1,72 @@
1
+ // Generated file - do not edit directly
2
+ export const templates: Record<string, Record<string, Record<string, string>>> = {
3
+ "js": {
4
+ "fetch": {
5
+ "basic": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
6
+ "basicAudio": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"audio/flac\"\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
7
+ "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\"\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
+ "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
9
+ "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});",
10
+ "zeroShotClassification": "async function query(data) {\n const response = await fetch(\n\t\t\"{{ fullUrl }}\",\n {\n headers: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n \"Content-Type\": \"application/json\",\n },\n method: \"POST\",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: [\"refund\", \"legal\", \"faq\"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});"
11
+ },
12
+ "huggingface.js": {
13
+ "basic": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst output = await client.{{ methodName }}({\n\tmodel: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
14
+ "basicAudio": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
15
+ "basicImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
16
+ "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
17
+ "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = await client.chatCompletionStream({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t} \n}",
18
+ "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n});\n/// Use the generated image (it's a Blob)",
19
+ "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n});\n// Use the generated video (it's a Blob)"
20
+ },
21
+ "openai": {
22
+ "conversational": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n\tmodel: \"{{ providerModelId }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
23
+ "conversationalStream": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n});\n\nlet out = \"\";\n\nconst stream = await client.chat.completions.create({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t} \n}"
24
+ }
25
+ },
26
+ "python": {
27
+ "fal_client": {
28
+ "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} "
29
+ },
30
+ "huggingface_hub": {
31
+ "basic": "result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n)",
32
+ "basicAudio": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
33
+ "basicImage": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
34
+ "conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
35
+ "conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ",
36
+ "documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
37
+ "imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
38
+ "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n)",
39
+ "textToImage": "# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) ",
40
+ "textToVideo": "video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) "
41
+ },
42
+ "openai": {
43
+ "conversational": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\"\n)\n\ncompletion = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
44
+ "conversationalStream": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\"\n)\n\nstream = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\")"
45
+ },
46
+ "requests": {
47
+ "basic": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n}) ",
48
+ "basicAudio": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"audio/flac\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})",
49
+ "basicImage": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"image/jpeg\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})",
50
+ "conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])",
51
+ "conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
52
+ "documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ",
53
+ "imageToImage": "def query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
54
+ "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\"Authorization\": \"{{ authorizationHeader }}\"}",
55
+ "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
56
+ "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
57
+ "textToImage": "{% if provider == \"hf-inference\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}",
58
+ "zeroShotClassification": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]},\n}) ",
59
+ "zeroShotImageClassification": "def query(data):\n with open(data[\"image_path\"], \"rb\") as f:\n img = f.read()\n payload={\n \"parameters\": data[\"parameters\"],\n \"inputs\": base64.b64encode(img).decode(\"utf-8\")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"image_path\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"cat\", \"dog\", \"llama\"]},\n}) "
60
+ }
61
+ },
62
+ "sh": {
63
+ "curl": {
64
+ "basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
65
+ "basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
66
+ "basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
67
+ "conversational": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": false\n }'",
68
+ "conversationalStream": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": true\n }'",
69
+ "zeroShotClassification": "curl {{ fullUrl }} \\\n -X POST \\\n -d '{\"inputs\": {{ providerInputs.asObj.inputs }}, \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]}}' \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: {{ authorizationHeader }}'"
70
+ }
71
+ }
72
+ } as const;
@@ -5,17 +5,13 @@ import { omit } from "../../utils/omit";
5
5
  import { isUrl } from "../../lib/isUrl";
6
6
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
7
7
  import { typedInclude } from "../../utils/typedInclude";
8
+ import { makeRequestOptions } from "../../lib/makeRequestOptions";
9
+ import { pollFalResponse, type FalAiQueueOutput } from "../../providers/fal-ai";
8
10
 
9
11
  export type TextToVideoArgs = BaseArgs & TextToVideoInput;
10
12
 
11
13
  export type TextToVideoOutput = Blob;
12
14
 
13
- interface FalAiOutput {
14
- video: {
15
- url: string;
16
- };
17
- }
18
-
19
15
  interface ReplicateOutput {
20
16
  output: string;
21
17
  }
@@ -39,25 +35,13 @@ export async function textToVideo(args: TextToVideoArgs, options?: Options): Pro
39
35
  args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita"
40
36
  ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs }
41
37
  : args;
42
- const res = await request<FalAiOutput | ReplicateOutput | NovitaOutput>(payload, {
38
+ const res = await request<FalAiQueueOutput | ReplicateOutput | NovitaOutput>(payload, {
43
39
  ...options,
44
40
  task: "text-to-video",
45
41
  });
46
42
  if (args.provider === "fal-ai") {
47
- const isValidOutput =
48
- typeof res === "object" &&
49
- !!res &&
50
- "video" in res &&
51
- typeof res.video === "object" &&
52
- !!res.video &&
53
- "url" in res.video &&
54
- typeof res.video.url === "string" &&
55
- isUrl(res.video.url);
56
- if (!isValidOutput) {
57
- throw new InferenceOutputError("Expected { video: { url: string } }");
58
- }
59
- const urlResponse = await fetch((res as FalAiOutput).video.url);
60
- return await urlResponse.blob();
43
+ const { url, info } = await makeRequestOptions(args, { ...options, task: "text-to-video" });
44
+ return await pollFalResponse(res as FalAiQueueOutput, url, info.headers as Record<string, string>);
61
45
  } else if (args.provider === "novita") {
62
46
  const isValidOutput =
63
47
  typeof res === "object" &&
package/src/types.ts CHANGED
@@ -94,19 +94,22 @@ export type RequestArgs = BaseArgs &
94
94
  };
95
95
 
96
96
  export interface ProviderConfig {
97
- baseUrl: string;
97
+ makeBaseUrl: ((task?: InferenceTask) => string) | (() => string);
98
98
  makeBody: (params: BodyParams) => Record<string, unknown>;
99
99
  makeHeaders: (params: HeaderParams) => Record<string, string>;
100
100
  makeUrl: (params: UrlParams) => string;
101
101
  clientSideRoutingOnly?: boolean;
102
102
  }
103
103
 
104
+ export type AuthMethod = "none" | "hf-token" | "credentials-include" | "provider-key";
105
+
104
106
  export interface HeaderParams {
105
107
  accessToken?: string;
106
- authMethod: "none" | "hf-token" | "credentials-include" | "provider-key";
108
+ authMethod: AuthMethod;
107
109
  }
108
110
 
109
111
  export interface UrlParams {
112
+ authMethod: AuthMethod;
110
113
  baseUrl: string;
111
114
  model: string;
112
115
  task?: InferenceTask;