@huggingface/inference 3.13.2 → 3.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (600) hide show
  1. package/dist/{src → commonjs}/InferenceClient.d.ts +2 -2
  2. package/dist/commonjs/InferenceClient.d.ts.map +1 -0
  3. package/dist/commonjs/InferenceClient.js +83 -0
  4. package/dist/commonjs/config.js +6 -0
  5. package/dist/commonjs/index.d.ts +7 -0
  6. package/dist/commonjs/index.d.ts.map +1 -0
  7. package/dist/commonjs/index.js +49 -0
  8. package/dist/commonjs/lib/InferenceOutputError.js +10 -0
  9. package/dist/commonjs/lib/getDefaultTask.js +48 -0
  10. package/dist/{src → commonjs}/lib/getInferenceProviderMapping.d.ts +1 -1
  11. package/dist/{src → commonjs}/lib/getInferenceProviderMapping.d.ts.map +1 -1
  12. package/dist/commonjs/lib/getInferenceProviderMapping.js +81 -0
  13. package/dist/{src → commonjs}/lib/getProviderHelper.d.ts +2 -2
  14. package/dist/{src → commonjs}/lib/getProviderHelper.d.ts.map +1 -1
  15. package/dist/commonjs/lib/getProviderHelper.js +168 -0
  16. package/dist/commonjs/lib/isUrl.js +6 -0
  17. package/dist/{src → commonjs}/lib/makeRequestOptions.d.ts +3 -3
  18. package/dist/{src → commonjs}/lib/makeRequestOptions.d.ts.map +1 -1
  19. package/dist/commonjs/lib/makeRequestOptions.js +161 -0
  20. package/dist/commonjs/package.d.ts +3 -0
  21. package/dist/commonjs/package.d.ts.map +1 -0
  22. package/dist/commonjs/package.js +6 -0
  23. package/dist/commonjs/package.json +3 -0
  24. package/dist/{src → commonjs}/providers/black-forest-labs.d.ts +2 -2
  25. package/dist/{src → commonjs}/providers/black-forest-labs.d.ts.map +1 -1
  26. package/dist/commonjs/providers/black-forest-labs.js +82 -0
  27. package/dist/{src → commonjs}/providers/cerebras.d.ts +1 -1
  28. package/dist/{src → commonjs}/providers/cerebras.d.ts.map +1 -1
  29. package/dist/commonjs/providers/cerebras.js +26 -0
  30. package/dist/{src → commonjs}/providers/cohere.d.ts +1 -1
  31. package/dist/{src → commonjs}/providers/cohere.d.ts.map +1 -1
  32. package/dist/commonjs/providers/cohere.js +29 -0
  33. package/dist/{src → commonjs}/providers/consts.d.ts +3 -3
  34. package/dist/commonjs/providers/consts.d.ts.map +1 -0
  35. package/dist/commonjs/providers/consts.js +35 -0
  36. package/dist/{src → commonjs}/providers/fal-ai.d.ts +3 -3
  37. package/dist/{src → commonjs}/providers/fal-ai.d.ts.map +1 -1
  38. package/dist/commonjs/providers/fal-ai.js +216 -0
  39. package/dist/{src → commonjs}/providers/featherless-ai.d.ts +2 -2
  40. package/dist/{src → commonjs}/providers/featherless-ai.d.ts.map +1 -1
  41. package/dist/commonjs/providers/featherless-ai.js +38 -0
  42. package/dist/{src → commonjs}/providers/fireworks-ai.d.ts +1 -1
  43. package/dist/commonjs/providers/fireworks-ai.d.ts.map +1 -0
  44. package/dist/commonjs/providers/fireworks-ai.js +29 -0
  45. package/dist/{src → commonjs}/providers/groq.d.ts +1 -1
  46. package/dist/{src → commonjs}/providers/groq.d.ts.map +1 -1
  47. package/dist/commonjs/providers/groq.js +39 -0
  48. package/dist/{src → commonjs}/providers/hf-inference.d.ts +6 -6
  49. package/dist/{src → commonjs}/providers/hf-inference.d.ts.map +1 -1
  50. package/dist/commonjs/providers/hf-inference.js +432 -0
  51. package/dist/{src → commonjs}/providers/hyperbolic.d.ts +2 -2
  52. package/dist/{src → commonjs}/providers/hyperbolic.d.ts.map +1 -1
  53. package/dist/commonjs/providers/hyperbolic.js +78 -0
  54. package/dist/{src → commonjs}/providers/nebius.d.ts +2 -2
  55. package/dist/{src → commonjs}/providers/nebius.d.ts.map +1 -1
  56. package/dist/commonjs/providers/nebius.js +70 -0
  57. package/dist/{src → commonjs}/providers/novita.d.ts +2 -2
  58. package/dist/{src → commonjs}/providers/novita.d.ts.map +1 -1
  59. package/dist/commonjs/providers/novita.js +73 -0
  60. package/dist/{src → commonjs}/providers/nscale.d.ts +2 -2
  61. package/dist/{src → commonjs}/providers/nscale.d.ts.map +1 -1
  62. package/dist/commonjs/providers/nscale.js +46 -0
  63. package/dist/{src → commonjs}/providers/openai.d.ts +1 -1
  64. package/dist/{src → commonjs}/providers/openai.d.ts.map +1 -1
  65. package/dist/commonjs/providers/openai.js +15 -0
  66. package/dist/{src → commonjs}/providers/ovhcloud.d.ts +2 -2
  67. package/dist/{src → commonjs}/providers/ovhcloud.d.ts.map +1 -1
  68. package/dist/commonjs/providers/ovhcloud.js +60 -0
  69. package/dist/{src → commonjs}/providers/providerHelper.d.ts +4 -4
  70. package/dist/{src → commonjs}/providers/providerHelper.d.ts.map +1 -1
  71. package/dist/commonjs/providers/providerHelper.js +108 -0
  72. package/dist/{src → commonjs}/providers/replicate.d.ts +2 -2
  73. package/dist/{src → commonjs}/providers/replicate.d.ts.map +1 -1
  74. package/dist/commonjs/providers/replicate.js +135 -0
  75. package/dist/{src → commonjs}/providers/sambanova.d.ts +3 -3
  76. package/dist/{src → commonjs}/providers/sambanova.d.ts.map +1 -1
  77. package/dist/commonjs/providers/sambanova.js +49 -0
  78. package/dist/{src → commonjs}/providers/together.d.ts +2 -2
  79. package/dist/{src → commonjs}/providers/together.d.ts.map +1 -1
  80. package/dist/commonjs/providers/together.js +71 -0
  81. package/dist/{src → commonjs}/snippets/getInferenceSnippets.d.ts +2 -2
  82. package/dist/{src → commonjs}/snippets/getInferenceSnippets.d.ts.map +1 -1
  83. package/dist/commonjs/snippets/getInferenceSnippets.js +312 -0
  84. package/dist/commonjs/snippets/index.js +5 -0
  85. package/dist/commonjs/snippets/templates.exported.js +81 -0
  86. package/dist/{src → commonjs}/tasks/audio/audioClassification.d.ts +2 -2
  87. package/dist/{src → commonjs}/tasks/audio/audioClassification.d.ts.map +1 -1
  88. package/dist/commonjs/tasks/audio/audioClassification.js +21 -0
  89. package/dist/{src → commonjs}/tasks/audio/audioToAudio.d.ts +2 -2
  90. package/dist/commonjs/tasks/audio/audioToAudio.d.ts.map +1 -0
  91. package/dist/commonjs/tasks/audio/audioToAudio.js +22 -0
  92. package/dist/{src → commonjs}/tasks/audio/automaticSpeechRecognition.d.ts +2 -2
  93. package/dist/commonjs/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -0
  94. package/dist/commonjs/tasks/audio/automaticSpeechRecognition.js +25 -0
  95. package/dist/{src → commonjs}/tasks/audio/textToSpeech.d.ts +1 -1
  96. package/dist/commonjs/tasks/audio/textToSpeech.d.ts.map +1 -0
  97. package/dist/commonjs/tasks/audio/textToSpeech.js +19 -0
  98. package/dist/{src → commonjs}/tasks/audio/utils.d.ts +1 -1
  99. package/dist/commonjs/tasks/audio/utils.d.ts.map +1 -0
  100. package/dist/commonjs/tasks/audio/utils.js +12 -0
  101. package/dist/{src → commonjs}/tasks/custom/request.d.ts +1 -1
  102. package/dist/commonjs/tasks/custom/request.d.ts.map +1 -0
  103. package/dist/commonjs/tasks/custom/request.js +17 -0
  104. package/dist/{src → commonjs}/tasks/custom/streamingRequest.d.ts +1 -1
  105. package/dist/commonjs/tasks/custom/streamingRequest.d.ts.map +1 -0
  106. package/dist/commonjs/tasks/custom/streamingRequest.js +16 -0
  107. package/dist/{src → commonjs}/tasks/cv/imageClassification.d.ts +2 -2
  108. package/dist/commonjs/tasks/cv/imageClassification.d.ts.map +1 -0
  109. package/dist/commonjs/tasks/cv/imageClassification.js +21 -0
  110. package/dist/{src → commonjs}/tasks/cv/imageSegmentation.d.ts +2 -2
  111. package/dist/commonjs/tasks/cv/imageSegmentation.d.ts.map +1 -0
  112. package/dist/commonjs/tasks/cv/imageSegmentation.js +21 -0
  113. package/dist/{src → commonjs}/tasks/cv/imageToImage.d.ts +1 -1
  114. package/dist/commonjs/tasks/cv/imageToImage.d.ts.map +1 -0
  115. package/dist/commonjs/tasks/cv/imageToImage.js +20 -0
  116. package/dist/{src → commonjs}/tasks/cv/imageToText.d.ts +2 -2
  117. package/dist/commonjs/tasks/cv/imageToText.d.ts.map +1 -0
  118. package/dist/commonjs/tasks/cv/imageToText.js +20 -0
  119. package/dist/{src → commonjs}/tasks/cv/objectDetection.d.ts +2 -2
  120. package/dist/commonjs/tasks/cv/objectDetection.d.ts.map +1 -0
  121. package/dist/commonjs/tasks/cv/objectDetection.js +21 -0
  122. package/dist/{src → commonjs}/tasks/cv/textToImage.d.ts +1 -1
  123. package/dist/commonjs/tasks/cv/textToImage.d.ts.map +1 -0
  124. package/dist/commonjs/tasks/cv/textToImage.js +17 -0
  125. package/dist/{src → commonjs}/tasks/cv/textToVideo.d.ts +1 -1
  126. package/dist/commonjs/tasks/cv/textToVideo.d.ts.map +1 -0
  127. package/dist/commonjs/tasks/cv/textToVideo.js +17 -0
  128. package/dist/{src → commonjs}/tasks/cv/utils.d.ts +1 -1
  129. package/dist/commonjs/tasks/cv/utils.d.ts.map +1 -0
  130. package/dist/commonjs/tasks/cv/utils.js +7 -0
  131. package/dist/{src → commonjs}/tasks/cv/zeroShotImageClassification.d.ts +1 -1
  132. package/dist/commonjs/tasks/cv/zeroShotImageClassification.d.ts.map +1 -0
  133. package/dist/commonjs/tasks/cv/zeroShotImageClassification.js +39 -0
  134. package/dist/commonjs/tasks/index.d.ts +33 -0
  135. package/dist/commonjs/tasks/index.d.ts.map +1 -0
  136. package/dist/commonjs/tasks/index.js +54 -0
  137. package/dist/{src → commonjs}/tasks/multimodal/documentQuestionAnswering.d.ts +1 -1
  138. package/dist/commonjs/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -0
  139. package/dist/commonjs/tasks/multimodal/documentQuestionAnswering.js +27 -0
  140. package/dist/{src → commonjs}/tasks/multimodal/visualQuestionAnswering.d.ts +1 -1
  141. package/dist/commonjs/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -0
  142. package/dist/commonjs/tasks/multimodal/visualQuestionAnswering.js +27 -0
  143. package/dist/{src → commonjs}/tasks/nlp/chatCompletion.d.ts +1 -1
  144. package/dist/{src → commonjs}/tasks/nlp/chatCompletion.d.ts.map +1 -1
  145. package/dist/commonjs/tasks/nlp/chatCompletion.js +18 -0
  146. package/dist/{src → commonjs}/tasks/nlp/chatCompletionStream.d.ts +1 -1
  147. package/dist/{src → commonjs}/tasks/nlp/chatCompletionStream.d.ts.map +1 -1
  148. package/dist/commonjs/tasks/nlp/chatCompletionStream.js +17 -0
  149. package/dist/{src → commonjs}/tasks/nlp/featureExtraction.d.ts +1 -1
  150. package/dist/commonjs/tasks/nlp/featureExtraction.d.ts.map +1 -0
  151. package/dist/commonjs/tasks/nlp/featureExtraction.js +18 -0
  152. package/dist/{src → commonjs}/tasks/nlp/fillMask.d.ts +1 -1
  153. package/dist/commonjs/tasks/nlp/fillMask.d.ts.map +1 -0
  154. package/dist/commonjs/tasks/nlp/fillMask.js +18 -0
  155. package/dist/{src → commonjs}/tasks/nlp/questionAnswering.d.ts +1 -1
  156. package/dist/commonjs/tasks/nlp/questionAnswering.d.ts.map +1 -0
  157. package/dist/commonjs/tasks/nlp/questionAnswering.js +18 -0
  158. package/dist/{src → commonjs}/tasks/nlp/sentenceSimilarity.d.ts +1 -1
  159. package/dist/commonjs/tasks/nlp/sentenceSimilarity.d.ts.map +1 -0
  160. package/dist/commonjs/tasks/nlp/sentenceSimilarity.js +18 -0
  161. package/dist/{src → commonjs}/tasks/nlp/summarization.d.ts +1 -1
  162. package/dist/commonjs/tasks/nlp/summarization.d.ts.map +1 -0
  163. package/dist/commonjs/tasks/nlp/summarization.js +18 -0
  164. package/dist/{src → commonjs}/tasks/nlp/tableQuestionAnswering.d.ts +1 -1
  165. package/dist/{src → commonjs}/tasks/nlp/tableQuestionAnswering.d.ts.map +1 -1
  166. package/dist/commonjs/tasks/nlp/tableQuestionAnswering.js +18 -0
  167. package/dist/{src → commonjs}/tasks/nlp/textClassification.d.ts +1 -1
  168. package/dist/commonjs/tasks/nlp/textClassification.d.ts.map +1 -0
  169. package/dist/commonjs/tasks/nlp/textClassification.js +18 -0
  170. package/dist/{src → commonjs}/tasks/nlp/textGeneration.d.ts +1 -1
  171. package/dist/commonjs/tasks/nlp/textGeneration.d.ts.map +1 -0
  172. package/dist/commonjs/tasks/nlp/textGeneration.js +18 -0
  173. package/dist/{src → commonjs}/tasks/nlp/textGenerationStream.d.ts +1 -1
  174. package/dist/commonjs/tasks/nlp/textGenerationStream.d.ts.map +1 -0
  175. package/dist/commonjs/tasks/nlp/textGenerationStream.js +17 -0
  176. package/dist/{src → commonjs}/tasks/nlp/tokenClassification.d.ts +1 -1
  177. package/dist/{src → commonjs}/tasks/nlp/tokenClassification.d.ts.map +1 -1
  178. package/dist/commonjs/tasks/nlp/tokenClassification.js +18 -0
  179. package/dist/{src → commonjs}/tasks/nlp/translation.d.ts +1 -1
  180. package/dist/commonjs/tasks/nlp/translation.d.ts.map +1 -0
  181. package/dist/commonjs/tasks/nlp/translation.js +18 -0
  182. package/dist/{src → commonjs}/tasks/nlp/zeroShotClassification.d.ts +1 -1
  183. package/dist/{src → commonjs}/tasks/nlp/zeroShotClassification.d.ts.map +1 -1
  184. package/dist/commonjs/tasks/nlp/zeroShotClassification.js +18 -0
  185. package/dist/{src → commonjs}/tasks/tabular/tabularClassification.d.ts +1 -1
  186. package/dist/commonjs/tasks/tabular/tabularClassification.d.ts.map +1 -0
  187. package/dist/commonjs/tasks/tabular/tabularClassification.js +20 -0
  188. package/dist/{src → commonjs}/tasks/tabular/tabularRegression.d.ts +1 -1
  189. package/dist/commonjs/tasks/tabular/tabularRegression.d.ts.map +1 -0
  190. package/dist/commonjs/tasks/tabular/tabularRegression.js +20 -0
  191. package/dist/{src → commonjs}/types.d.ts +1 -1
  192. package/dist/{src → commonjs}/types.d.ts.map +1 -1
  193. package/dist/commonjs/types.js +23 -0
  194. package/dist/commonjs/utils/base64FromBytes.js +15 -0
  195. package/dist/commonjs/utils/delay.js +8 -0
  196. package/dist/commonjs/utils/distributive-omit.js +7 -0
  197. package/dist/commonjs/utils/isBackend.js +6 -0
  198. package/dist/commonjs/utils/isFrontend.js +5 -0
  199. package/dist/commonjs/utils/omit.js +13 -0
  200. package/dist/commonjs/utils/pick.js +13 -0
  201. package/dist/{src → commonjs}/utils/request.d.ts +2 -2
  202. package/dist/{src → commonjs}/utils/request.d.ts.map +1 -1
  203. package/dist/commonjs/utils/request.js +116 -0
  204. package/dist/commonjs/utils/toArray.js +9 -0
  205. package/dist/commonjs/utils/typedEntries.js +6 -0
  206. package/dist/commonjs/utils/typedInclude.js +6 -0
  207. package/dist/commonjs/vendor/fetch-event-source/parse.d.ts.map +1 -0
  208. package/dist/commonjs/vendor/fetch-event-source/parse.js +185 -0
  209. package/dist/commonjs/vendor/fetch-event-source/parse.spec.js +370 -0
  210. package/dist/esm/InferenceClient.d.ts +32 -0
  211. package/dist/esm/InferenceClient.d.ts.map +1 -0
  212. package/dist/esm/InferenceClient.js +44 -0
  213. package/dist/esm/config.d.ts +4 -0
  214. package/dist/esm/config.d.ts.map +1 -0
  215. package/dist/esm/config.js +3 -0
  216. package/dist/esm/index.d.ts +7 -0
  217. package/dist/esm/index.d.ts.map +1 -0
  218. package/dist/{src/index.d.ts → esm/index.js} +4 -5
  219. package/dist/esm/lib/InferenceOutputError.d.ts +4 -0
  220. package/dist/esm/lib/InferenceOutputError.d.ts.map +1 -0
  221. package/dist/esm/lib/InferenceOutputError.js +6 -0
  222. package/dist/esm/lib/getDefaultTask.d.ts +11 -0
  223. package/dist/esm/lib/getDefaultTask.d.ts.map +1 -0
  224. package/dist/esm/lib/getDefaultTask.js +45 -0
  225. package/dist/esm/lib/getInferenceProviderMapping.d.ts +25 -0
  226. package/dist/esm/lib/getInferenceProviderMapping.d.ts.map +1 -0
  227. package/dist/esm/lib/getInferenceProviderMapping.js +75 -0
  228. package/dist/esm/lib/getProviderHelper.d.ts +37 -0
  229. package/dist/esm/lib/getProviderHelper.d.ts.map +1 -0
  230. package/dist/esm/lib/getProviderHelper.js +131 -0
  231. package/dist/esm/lib/isUrl.d.ts +2 -0
  232. package/dist/esm/lib/isUrl.d.ts.map +1 -0
  233. package/dist/esm/lib/isUrl.js +3 -0
  234. package/dist/esm/lib/makeRequestOptions.d.ts +31 -0
  235. package/dist/esm/lib/makeRequestOptions.d.ts.map +1 -0
  236. package/dist/esm/lib/makeRequestOptions.js +157 -0
  237. package/dist/esm/package.d.ts +3 -0
  238. package/dist/esm/package.d.ts.map +1 -0
  239. package/dist/esm/package.js +3 -0
  240. package/dist/esm/package.json +3 -0
  241. package/dist/esm/providers/black-forest-labs.d.ts +15 -0
  242. package/dist/esm/providers/black-forest-labs.d.ts.map +1 -0
  243. package/dist/esm/providers/black-forest-labs.js +78 -0
  244. package/dist/esm/providers/cerebras.d.ts +21 -0
  245. package/dist/esm/providers/cerebras.d.ts.map +1 -0
  246. package/dist/esm/providers/cerebras.js +22 -0
  247. package/dist/esm/providers/cohere.d.ts +22 -0
  248. package/dist/esm/providers/cohere.d.ts.map +1 -0
  249. package/dist/esm/providers/cohere.js +25 -0
  250. package/dist/esm/providers/consts.d.ts +12 -0
  251. package/dist/esm/providers/consts.d.ts.map +1 -0
  252. package/dist/esm/providers/consts.js +32 -0
  253. package/dist/esm/providers/fal-ai.d.ts +42 -0
  254. package/dist/esm/providers/fal-ai.d.ts.map +1 -0
  255. package/dist/esm/providers/fal-ai.js +209 -0
  256. package/dist/esm/providers/featherless-ai.d.ts +22 -0
  257. package/dist/esm/providers/featherless-ai.d.ts.map +1 -0
  258. package/dist/esm/providers/featherless-ai.js +33 -0
  259. package/dist/esm/providers/fireworks-ai.d.ts +22 -0
  260. package/dist/esm/providers/fireworks-ai.d.ts.map +1 -0
  261. package/dist/esm/providers/fireworks-ai.js +25 -0
  262. package/dist/esm/providers/groq.d.ts +10 -0
  263. package/dist/esm/providers/groq.d.ts.map +1 -0
  264. package/dist/esm/providers/groq.js +34 -0
  265. package/dist/esm/providers/hf-inference.d.ts +131 -0
  266. package/dist/esm/providers/hf-inference.d.ts.map +1 -0
  267. package/dist/esm/providers/hf-inference.js +400 -0
  268. package/dist/esm/providers/hyperbolic.d.ts +48 -0
  269. package/dist/esm/providers/hyperbolic.d.ts.map +1 -0
  270. package/dist/esm/providers/hyperbolic.js +72 -0
  271. package/dist/esm/providers/nebius.d.ts +49 -0
  272. package/dist/esm/providers/nebius.d.ts.map +1 -0
  273. package/dist/esm/providers/nebius.js +63 -0
  274. package/dist/esm/providers/novita.d.ts +22 -0
  275. package/dist/esm/providers/novita.d.ts.map +1 -0
  276. package/dist/esm/providers/novita.js +67 -0
  277. package/dist/esm/providers/nscale.d.ts +35 -0
  278. package/dist/esm/providers/nscale.d.ts.map +1 -0
  279. package/dist/esm/providers/nscale.js +41 -0
  280. package/dist/esm/providers/openai.d.ts +8 -0
  281. package/dist/esm/providers/openai.d.ts.map +1 -0
  282. package/dist/esm/providers/openai.js +11 -0
  283. package/dist/esm/providers/ovhcloud.d.ts +38 -0
  284. package/dist/esm/providers/ovhcloud.d.ts.map +1 -0
  285. package/dist/esm/providers/ovhcloud.js +55 -0
  286. package/dist/esm/providers/providerHelper.d.ts +186 -0
  287. package/dist/esm/providers/providerHelper.d.ts.map +1 -0
  288. package/dist/esm/providers/providerHelper.js +102 -0
  289. package/dist/esm/providers/replicate.d.ts +25 -0
  290. package/dist/esm/providers/replicate.d.ts.map +1 -0
  291. package/dist/esm/providers/replicate.js +129 -0
  292. package/dist/esm/providers/sambanova.d.ts +14 -0
  293. package/dist/esm/providers/sambanova.d.ts.map +1 -0
  294. package/dist/esm/providers/sambanova.js +44 -0
  295. package/dist/esm/providers/together.d.ts +49 -0
  296. package/dist/esm/providers/together.d.ts.map +1 -0
  297. package/dist/esm/providers/together.js +65 -0
  298. package/dist/esm/snippets/getInferenceSnippets.d.ts +9 -0
  299. package/dist/esm/snippets/getInferenceSnippets.d.ts.map +1 -0
  300. package/dist/esm/snippets/getInferenceSnippets.js +309 -0
  301. package/dist/esm/snippets/index.d.ts +2 -0
  302. package/dist/esm/snippets/index.d.ts.map +1 -0
  303. package/dist/esm/snippets/index.js +1 -0
  304. package/dist/esm/snippets/templates.exported.d.ts +2 -0
  305. package/dist/esm/snippets/templates.exported.d.ts.map +1 -0
  306. package/dist/esm/snippets/templates.exported.js +78 -0
  307. package/dist/esm/tasks/audio/audioClassification.d.ts +10 -0
  308. package/dist/esm/tasks/audio/audioClassification.d.ts.map +1 -0
  309. package/dist/esm/tasks/audio/audioClassification.js +18 -0
  310. package/dist/esm/tasks/audio/audioToAudio.d.ts +29 -0
  311. package/dist/esm/tasks/audio/audioToAudio.d.ts.map +1 -0
  312. package/dist/esm/tasks/audio/audioToAudio.js +19 -0
  313. package/dist/esm/tasks/audio/automaticSpeechRecognition.d.ts +10 -0
  314. package/dist/esm/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -0
  315. package/dist/esm/tasks/audio/automaticSpeechRecognition.js +22 -0
  316. package/dist/esm/tasks/audio/textToSpeech.d.ts +10 -0
  317. package/dist/esm/tasks/audio/textToSpeech.d.ts.map +1 -0
  318. package/dist/esm/tasks/audio/textToSpeech.js +16 -0
  319. package/dist/esm/tasks/audio/utils.d.ts +12 -0
  320. package/dist/esm/tasks/audio/utils.d.ts.map +1 -0
  321. package/dist/esm/tasks/audio/utils.js +9 -0
  322. package/dist/esm/tasks/custom/request.d.ts +10 -0
  323. package/dist/esm/tasks/custom/request.d.ts.map +1 -0
  324. package/dist/esm/tasks/custom/request.js +14 -0
  325. package/dist/esm/tasks/custom/streamingRequest.d.ts +10 -0
  326. package/dist/esm/tasks/custom/streamingRequest.d.ts.map +1 -0
  327. package/dist/esm/tasks/custom/streamingRequest.js +13 -0
  328. package/dist/esm/tasks/cv/imageClassification.d.ts +10 -0
  329. package/dist/esm/tasks/cv/imageClassification.d.ts.map +1 -0
  330. package/dist/esm/tasks/cv/imageClassification.js +18 -0
  331. package/dist/esm/tasks/cv/imageSegmentation.d.ts +10 -0
  332. package/dist/esm/tasks/cv/imageSegmentation.d.ts.map +1 -0
  333. package/dist/esm/tasks/cv/imageSegmentation.js +18 -0
  334. package/dist/esm/tasks/cv/imageToImage.d.ts +9 -0
  335. package/dist/esm/tasks/cv/imageToImage.d.ts.map +1 -0
  336. package/dist/esm/tasks/cv/imageToImage.js +17 -0
  337. package/dist/esm/tasks/cv/imageToText.d.ts +9 -0
  338. package/dist/esm/tasks/cv/imageToText.d.ts.map +1 -0
  339. package/dist/esm/tasks/cv/imageToText.js +17 -0
  340. package/dist/esm/tasks/cv/objectDetection.d.ts +10 -0
  341. package/dist/esm/tasks/cv/objectDetection.d.ts.map +1 -0
  342. package/dist/esm/tasks/cv/objectDetection.js +18 -0
  343. package/dist/esm/tasks/cv/textToImage.d.ts +18 -0
  344. package/dist/esm/tasks/cv/textToImage.d.ts.map +1 -0
  345. package/dist/esm/tasks/cv/textToImage.js +14 -0
  346. package/dist/esm/tasks/cv/textToVideo.d.ts +6 -0
  347. package/dist/esm/tasks/cv/textToVideo.d.ts.map +1 -0
  348. package/dist/esm/tasks/cv/textToVideo.js +14 -0
  349. package/dist/esm/tasks/cv/utils.d.ts +11 -0
  350. package/dist/esm/tasks/cv/utils.d.ts.map +1 -0
  351. package/dist/esm/tasks/cv/utils.js +4 -0
  352. package/dist/esm/tasks/cv/zeroShotImageClassification.d.ts +18 -0
  353. package/dist/esm/tasks/cv/zeroShotImageClassification.d.ts.map +1 -0
  354. package/dist/esm/tasks/cv/zeroShotImageClassification.js +36 -0
  355. package/dist/esm/tasks/index.d.ts +33 -0
  356. package/dist/esm/tasks/index.d.ts.map +1 -0
  357. package/dist/esm/tasks/index.js +38 -0
  358. package/dist/esm/tasks/multimodal/documentQuestionAnswering.d.ts +12 -0
  359. package/dist/esm/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -0
  360. package/dist/esm/tasks/multimodal/documentQuestionAnswering.js +24 -0
  361. package/dist/esm/tasks/multimodal/visualQuestionAnswering.d.ts +12 -0
  362. package/dist/esm/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -0
  363. package/dist/esm/tasks/multimodal/visualQuestionAnswering.js +24 -0
  364. package/dist/esm/tasks/nlp/chatCompletion.d.ts +7 -0
  365. package/dist/esm/tasks/nlp/chatCompletion.d.ts.map +1 -0
  366. package/dist/esm/tasks/nlp/chatCompletion.js +15 -0
  367. package/dist/esm/tasks/nlp/chatCompletionStream.d.ts +7 -0
  368. package/dist/esm/tasks/nlp/chatCompletionStream.d.ts.map +1 -0
  369. package/dist/esm/tasks/nlp/chatCompletionStream.js +14 -0
  370. package/dist/esm/tasks/nlp/featureExtraction.d.ts +17 -0
  371. package/dist/esm/tasks/nlp/featureExtraction.d.ts.map +1 -0
  372. package/dist/esm/tasks/nlp/featureExtraction.js +15 -0
  373. package/dist/esm/tasks/nlp/fillMask.d.ts +8 -0
  374. package/dist/esm/tasks/nlp/fillMask.d.ts.map +1 -0
  375. package/dist/esm/tasks/nlp/fillMask.js +15 -0
  376. package/dist/esm/tasks/nlp/questionAnswering.d.ts +8 -0
  377. package/dist/esm/tasks/nlp/questionAnswering.d.ts.map +1 -0
  378. package/dist/esm/tasks/nlp/questionAnswering.js +15 -0
  379. package/dist/esm/tasks/nlp/sentenceSimilarity.d.ts +8 -0
  380. package/dist/esm/tasks/nlp/sentenceSimilarity.d.ts.map +1 -0
  381. package/dist/esm/tasks/nlp/sentenceSimilarity.js +15 -0
  382. package/dist/esm/tasks/nlp/summarization.d.ts +8 -0
  383. package/dist/esm/tasks/nlp/summarization.d.ts.map +1 -0
  384. package/dist/esm/tasks/nlp/summarization.js +15 -0
  385. package/dist/esm/tasks/nlp/tableQuestionAnswering.d.ts +8 -0
  386. package/dist/esm/tasks/nlp/tableQuestionAnswering.d.ts.map +1 -0
  387. package/dist/esm/tasks/nlp/tableQuestionAnswering.js +15 -0
  388. package/dist/esm/tasks/nlp/textClassification.d.ts +8 -0
  389. package/dist/esm/tasks/nlp/textClassification.d.ts.map +1 -0
  390. package/dist/esm/tasks/nlp/textClassification.js +15 -0
  391. package/dist/esm/tasks/nlp/textGeneration.d.ts +8 -0
  392. package/dist/esm/tasks/nlp/textGeneration.d.ts.map +1 -0
  393. package/dist/esm/tasks/nlp/textGeneration.js +15 -0
  394. package/dist/esm/tasks/nlp/textGenerationStream.d.ts +81 -0
  395. package/dist/esm/tasks/nlp/textGenerationStream.d.ts.map +1 -0
  396. package/dist/esm/tasks/nlp/textGenerationStream.js +14 -0
  397. package/dist/esm/tasks/nlp/tokenClassification.d.ts +8 -0
  398. package/dist/esm/tasks/nlp/tokenClassification.d.ts.map +1 -0
  399. package/dist/esm/tasks/nlp/tokenClassification.js +15 -0
  400. package/dist/esm/tasks/nlp/translation.d.ts +8 -0
  401. package/dist/esm/tasks/nlp/translation.d.ts.map +1 -0
  402. package/dist/esm/tasks/nlp/translation.js +15 -0
  403. package/dist/esm/tasks/nlp/zeroShotClassification.d.ts +8 -0
  404. package/dist/esm/tasks/nlp/zeroShotClassification.d.ts.map +1 -0
  405. package/dist/esm/tasks/nlp/zeroShotClassification.js +15 -0
  406. package/dist/esm/tasks/tabular/tabularClassification.d.ts +20 -0
  407. package/dist/esm/tasks/tabular/tabularClassification.d.ts.map +1 -0
  408. package/dist/esm/tasks/tabular/tabularClassification.js +17 -0
  409. package/dist/esm/tasks/tabular/tabularRegression.d.ts +20 -0
  410. package/dist/esm/tasks/tabular/tabularRegression.d.ts.map +1 -0
  411. package/dist/esm/tasks/tabular/tabularRegression.js +17 -0
  412. package/dist/esm/types.d.ts +97 -0
  413. package/dist/esm/types.d.ts.map +1 -0
  414. package/dist/esm/types.js +20 -0
  415. package/dist/esm/utils/base64FromBytes.d.ts +2 -0
  416. package/dist/esm/utils/base64FromBytes.d.ts.map +1 -0
  417. package/dist/esm/utils/base64FromBytes.js +12 -0
  418. package/dist/esm/utils/delay.d.ts +2 -0
  419. package/dist/esm/utils/delay.d.ts.map +1 -0
  420. package/dist/esm/utils/delay.js +5 -0
  421. package/dist/esm/utils/distributive-omit.d.ts +9 -0
  422. package/dist/esm/utils/distributive-omit.d.ts.map +1 -0
  423. package/dist/esm/utils/distributive-omit.js +6 -0
  424. package/dist/esm/utils/isBackend.d.ts +2 -0
  425. package/dist/esm/utils/isBackend.d.ts.map +1 -0
  426. package/dist/esm/utils/isBackend.js +3 -0
  427. package/dist/esm/utils/isFrontend.d.ts +2 -0
  428. package/dist/esm/utils/isFrontend.d.ts.map +1 -0
  429. package/dist/esm/utils/isFrontend.js +2 -0
  430. package/dist/esm/utils/omit.d.ts +5 -0
  431. package/dist/esm/utils/omit.d.ts.map +1 -0
  432. package/dist/esm/utils/omit.js +10 -0
  433. package/dist/esm/utils/pick.d.ts +5 -0
  434. package/dist/esm/utils/pick.d.ts.map +1 -0
  435. package/dist/esm/utils/pick.js +10 -0
  436. package/dist/esm/utils/request.d.ts +28 -0
  437. package/dist/esm/utils/request.d.ts.map +1 -0
  438. package/dist/esm/utils/request.js +112 -0
  439. package/dist/esm/utils/toArray.d.ts +2 -0
  440. package/dist/esm/utils/toArray.d.ts.map +1 -0
  441. package/dist/esm/utils/toArray.js +6 -0
  442. package/dist/esm/utils/typedEntries.d.ts +4 -0
  443. package/dist/esm/utils/typedEntries.d.ts.map +1 -0
  444. package/dist/esm/utils/typedEntries.js +3 -0
  445. package/dist/esm/utils/typedInclude.d.ts +2 -0
  446. package/dist/esm/utils/typedInclude.d.ts.map +1 -0
  447. package/dist/esm/utils/typedInclude.js +3 -0
  448. package/dist/esm/vendor/fetch-event-source/parse.d.ts +69 -0
  449. package/dist/esm/vendor/fetch-event-source/parse.d.ts.map +1 -0
  450. package/dist/esm/vendor/fetch-event-source/parse.js +180 -0
  451. package/dist/esm/vendor/fetch-event-source/parse.spec.d.ts +2 -0
  452. package/dist/esm/vendor/fetch-event-source/parse.spec.d.ts.map +1 -0
  453. package/dist/esm/vendor/fetch-event-source/parse.spec.js +335 -0
  454. package/package.json +26 -12
  455. package/src/InferenceClient.ts +4 -4
  456. package/src/index.ts +4 -4
  457. package/src/lib/getDefaultTask.ts +2 -2
  458. package/src/lib/getInferenceProviderMapping.ts +5 -5
  459. package/src/lib/getProviderHelper.ts +19 -19
  460. package/src/lib/makeRequestOptions.ts +8 -8
  461. package/src/package.ts +3 -0
  462. package/src/providers/black-forest-labs.ts +5 -5
  463. package/src/providers/cerebras.ts +1 -1
  464. package/src/providers/cohere.ts +1 -1
  465. package/src/providers/consts.ts +3 -3
  466. package/src/providers/fal-ai.ts +9 -9
  467. package/src/providers/featherless-ai.ts +3 -3
  468. package/src/providers/fireworks-ai.ts +1 -1
  469. package/src/providers/groq.ts +1 -1
  470. package/src/providers/hf-inference.ts +11 -11
  471. package/src/providers/hyperbolic.ts +4 -4
  472. package/src/providers/nebius.ts +4 -4
  473. package/src/providers/novita.ts +5 -5
  474. package/src/providers/nscale.ts +4 -4
  475. package/src/providers/openai.ts +1 -1
  476. package/src/providers/ovhcloud.ts +4 -4
  477. package/src/providers/providerHelper.ts +7 -7
  478. package/src/providers/replicate.ts +5 -5
  479. package/src/providers/sambanova.ts +4 -4
  480. package/src/providers/together.ts +4 -4
  481. package/src/snippets/getInferenceSnippets.ts +7 -7
  482. package/src/tasks/audio/audioClassification.ts +6 -6
  483. package/src/tasks/audio/audioToAudio.ts +6 -6
  484. package/src/tasks/audio/automaticSpeechRecognition.ts +6 -6
  485. package/src/tasks/audio/textToSpeech.ts +4 -4
  486. package/src/tasks/audio/utils.ts +2 -2
  487. package/src/tasks/custom/request.ts +4 -4
  488. package/src/tasks/custom/streamingRequest.ts +4 -4
  489. package/src/tasks/cv/imageClassification.ts +5 -5
  490. package/src/tasks/cv/imageSegmentation.ts +5 -5
  491. package/src/tasks/cv/imageToImage.ts +4 -4
  492. package/src/tasks/cv/imageToText.ts +6 -6
  493. package/src/tasks/cv/objectDetection.ts +5 -5
  494. package/src/tasks/cv/textToImage.ts +5 -5
  495. package/src/tasks/cv/textToVideo.ts +8 -8
  496. package/src/tasks/cv/utils.ts +2 -2
  497. package/src/tasks/cv/zeroShotImageClassification.ts +5 -5
  498. package/src/tasks/index.ts +32 -32
  499. package/src/tasks/multimodal/documentQuestionAnswering.ts +5 -5
  500. package/src/tasks/multimodal/visualQuestionAnswering.ts +5 -5
  501. package/src/tasks/nlp/chatCompletion.ts +4 -4
  502. package/src/tasks/nlp/chatCompletionStream.ts +4 -4
  503. package/src/tasks/nlp/featureExtraction.ts +4 -4
  504. package/src/tasks/nlp/fillMask.ts +4 -4
  505. package/src/tasks/nlp/questionAnswering.ts +4 -4
  506. package/src/tasks/nlp/sentenceSimilarity.ts +4 -4
  507. package/src/tasks/nlp/summarization.ts +4 -4
  508. package/src/tasks/nlp/tableQuestionAnswering.ts +4 -4
  509. package/src/tasks/nlp/textClassification.ts +4 -4
  510. package/src/tasks/nlp/textGeneration.ts +5 -5
  511. package/src/tasks/nlp/textGenerationStream.ts +4 -4
  512. package/src/tasks/nlp/tokenClassification.ts +4 -4
  513. package/src/tasks/nlp/translation.ts +4 -4
  514. package/src/tasks/nlp/zeroShotClassification.ts +4 -4
  515. package/src/tasks/tabular/tabularClassification.ts +4 -4
  516. package/src/tasks/tabular/tabularRegression.ts +4 -4
  517. package/src/types.ts +1 -1
  518. package/src/utils/isFrontend.ts +1 -1
  519. package/src/utils/omit.ts +2 -2
  520. package/src/utils/request.ts +5 -5
  521. package/src/vendor/fetch-event-source/parse.spec.ts +1 -1
  522. package/src/vendor/fetch-event-source/parse.ts +6 -5
  523. package/dist/index.cjs +0 -2868
  524. package/dist/index.js +0 -2812
  525. package/dist/src/InferenceClient.d.ts.map +0 -1
  526. package/dist/src/index.d.ts.map +0 -1
  527. package/dist/src/providers/consts.d.ts.map +0 -1
  528. package/dist/src/providers/fireworks-ai.d.ts.map +0 -1
  529. package/dist/src/tasks/audio/audioToAudio.d.ts.map +0 -1
  530. package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +0 -1
  531. package/dist/src/tasks/audio/textToSpeech.d.ts.map +0 -1
  532. package/dist/src/tasks/audio/utils.d.ts.map +0 -1
  533. package/dist/src/tasks/custom/request.d.ts.map +0 -1
  534. package/dist/src/tasks/custom/streamingRequest.d.ts.map +0 -1
  535. package/dist/src/tasks/cv/imageClassification.d.ts.map +0 -1
  536. package/dist/src/tasks/cv/imageSegmentation.d.ts.map +0 -1
  537. package/dist/src/tasks/cv/imageToImage.d.ts.map +0 -1
  538. package/dist/src/tasks/cv/imageToText.d.ts.map +0 -1
  539. package/dist/src/tasks/cv/objectDetection.d.ts.map +0 -1
  540. package/dist/src/tasks/cv/textToImage.d.ts.map +0 -1
  541. package/dist/src/tasks/cv/textToVideo.d.ts.map +0 -1
  542. package/dist/src/tasks/cv/utils.d.ts.map +0 -1
  543. package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +0 -1
  544. package/dist/src/tasks/index.d.ts +0 -33
  545. package/dist/src/tasks/index.d.ts.map +0 -1
  546. package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +0 -1
  547. package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +0 -1
  548. package/dist/src/tasks/nlp/featureExtraction.d.ts.map +0 -1
  549. package/dist/src/tasks/nlp/fillMask.d.ts.map +0 -1
  550. package/dist/src/tasks/nlp/questionAnswering.d.ts.map +0 -1
  551. package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +0 -1
  552. package/dist/src/tasks/nlp/summarization.d.ts.map +0 -1
  553. package/dist/src/tasks/nlp/textClassification.d.ts.map +0 -1
  554. package/dist/src/tasks/nlp/textGeneration.d.ts.map +0 -1
  555. package/dist/src/tasks/nlp/textGenerationStream.d.ts.map +0 -1
  556. package/dist/src/tasks/nlp/translation.d.ts.map +0 -1
  557. package/dist/src/tasks/tabular/tabularClassification.d.ts.map +0 -1
  558. package/dist/src/tasks/tabular/tabularRegression.d.ts.map +0 -1
  559. package/dist/src/vendor/fetch-event-source/parse.d.ts.map +0 -1
  560. package/dist/test/InferenceClient.spec.d.ts +0 -2
  561. package/dist/test/InferenceClient.spec.d.ts.map +0 -1
  562. package/dist/test/expect-closeto.d.ts +0 -2
  563. package/dist/test/expect-closeto.d.ts.map +0 -1
  564. package/dist/test/test-files.d.ts +0 -2
  565. package/dist/test/test-files.d.ts.map +0 -1
  566. /package/dist/{src → commonjs}/config.d.ts +0 -0
  567. /package/dist/{src → commonjs}/config.d.ts.map +0 -0
  568. /package/dist/{src → commonjs}/lib/InferenceOutputError.d.ts +0 -0
  569. /package/dist/{src → commonjs}/lib/InferenceOutputError.d.ts.map +0 -0
  570. /package/dist/{src → commonjs}/lib/getDefaultTask.d.ts +0 -0
  571. /package/dist/{src → commonjs}/lib/getDefaultTask.d.ts.map +0 -0
  572. /package/dist/{src → commonjs}/lib/isUrl.d.ts +0 -0
  573. /package/dist/{src → commonjs}/lib/isUrl.d.ts.map +0 -0
  574. /package/dist/{src → commonjs}/snippets/index.d.ts +0 -0
  575. /package/dist/{src → commonjs}/snippets/index.d.ts.map +0 -0
  576. /package/dist/{src → commonjs}/snippets/templates.exported.d.ts +0 -0
  577. /package/dist/{src → commonjs}/snippets/templates.exported.d.ts.map +0 -0
  578. /package/dist/{src → commonjs}/utils/base64FromBytes.d.ts +0 -0
  579. /package/dist/{src → commonjs}/utils/base64FromBytes.d.ts.map +0 -0
  580. /package/dist/{src → commonjs}/utils/delay.d.ts +0 -0
  581. /package/dist/{src → commonjs}/utils/delay.d.ts.map +0 -0
  582. /package/dist/{src → commonjs}/utils/distributive-omit.d.ts +0 -0
  583. /package/dist/{src → commonjs}/utils/distributive-omit.d.ts.map +0 -0
  584. /package/dist/{src → commonjs}/utils/isBackend.d.ts +0 -0
  585. /package/dist/{src → commonjs}/utils/isBackend.d.ts.map +0 -0
  586. /package/dist/{src → commonjs}/utils/isFrontend.d.ts +0 -0
  587. /package/dist/{src → commonjs}/utils/isFrontend.d.ts.map +0 -0
  588. /package/dist/{src → commonjs}/utils/omit.d.ts +0 -0
  589. /package/dist/{src → commonjs}/utils/omit.d.ts.map +0 -0
  590. /package/dist/{src → commonjs}/utils/pick.d.ts +0 -0
  591. /package/dist/{src → commonjs}/utils/pick.d.ts.map +0 -0
  592. /package/dist/{src → commonjs}/utils/toArray.d.ts +0 -0
  593. /package/dist/{src → commonjs}/utils/toArray.d.ts.map +0 -0
  594. /package/dist/{src → commonjs}/utils/typedEntries.d.ts +0 -0
  595. /package/dist/{src → commonjs}/utils/typedEntries.d.ts.map +0 -0
  596. /package/dist/{src → commonjs}/utils/typedInclude.d.ts +0 -0
  597. /package/dist/{src → commonjs}/utils/typedInclude.d.ts.map +0 -0
  598. /package/dist/{src → commonjs}/vendor/fetch-event-source/parse.d.ts +0 -0
  599. /package/dist/{src → commonjs}/vendor/fetch-event-source/parse.spec.d.ts +0 -0
  600. /package/dist/{src → commonjs}/vendor/fetch-event-source/parse.spec.d.ts.map +0 -0
package/dist/index.js DELETED
@@ -1,2812 +0,0 @@
1
- var __defProp = Object.defineProperty;
2
- var __export = (target, all) => {
3
- for (var name2 in all)
4
- __defProp(target, name2, { get: all[name2], enumerable: true });
5
- };
6
-
7
- // src/tasks/index.ts
8
- var tasks_exports = {};
9
- __export(tasks_exports, {
10
- audioClassification: () => audioClassification,
11
- audioToAudio: () => audioToAudio,
12
- automaticSpeechRecognition: () => automaticSpeechRecognition,
13
- chatCompletion: () => chatCompletion,
14
- chatCompletionStream: () => chatCompletionStream,
15
- documentQuestionAnswering: () => documentQuestionAnswering,
16
- featureExtraction: () => featureExtraction,
17
- fillMask: () => fillMask,
18
- imageClassification: () => imageClassification,
19
- imageSegmentation: () => imageSegmentation,
20
- imageToImage: () => imageToImage,
21
- imageToText: () => imageToText,
22
- objectDetection: () => objectDetection,
23
- questionAnswering: () => questionAnswering,
24
- request: () => request,
25
- sentenceSimilarity: () => sentenceSimilarity,
26
- streamingRequest: () => streamingRequest,
27
- summarization: () => summarization,
28
- tableQuestionAnswering: () => tableQuestionAnswering,
29
- tabularClassification: () => tabularClassification,
30
- tabularRegression: () => tabularRegression,
31
- textClassification: () => textClassification,
32
- textGeneration: () => textGeneration,
33
- textGenerationStream: () => textGenerationStream,
34
- textToImage: () => textToImage,
35
- textToSpeech: () => textToSpeech,
36
- textToVideo: () => textToVideo,
37
- tokenClassification: () => tokenClassification,
38
- translation: () => translation,
39
- visualQuestionAnswering: () => visualQuestionAnswering,
40
- zeroShotClassification: () => zeroShotClassification,
41
- zeroShotImageClassification: () => zeroShotImageClassification
42
- });
43
-
44
- // src/config.ts
45
- var HF_HUB_URL = "https://huggingface.co";
46
- var HF_ROUTER_URL = "https://router.huggingface.co";
47
- var HF_HEADER_X_BILL_TO = "X-HF-Bill-To";
48
-
49
- // src/providers/consts.ts
50
- var HARDCODED_MODEL_INFERENCE_MAPPING = {
51
- /**
52
- * "HF model ID" => "Model ID on Inference Provider's side"
53
- *
54
- * Example:
55
- * "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
56
- */
57
- "black-forest-labs": {},
58
- cerebras: {},
59
- cohere: {},
60
- "fal-ai": {},
61
- "featherless-ai": {},
62
- "fireworks-ai": {},
63
- groq: {},
64
- "hf-inference": {},
65
- hyperbolic: {},
66
- nebius: {},
67
- novita: {},
68
- nscale: {},
69
- openai: {},
70
- ovhcloud: {},
71
- replicate: {},
72
- sambanova: {},
73
- together: {}
74
- };
75
-
76
- // src/lib/InferenceOutputError.ts
77
- var InferenceOutputError = class extends TypeError {
78
- constructor(message) {
79
- super(
80
- `Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.`
81
- );
82
- this.name = "InferenceOutputError";
83
- }
84
- };
85
-
86
- // src/utils/toArray.ts
87
- function toArray(obj) {
88
- if (Array.isArray(obj)) {
89
- return obj;
90
- }
91
- return [obj];
92
- }
93
-
94
- // src/providers/providerHelper.ts
95
- var TaskProviderHelper = class {
96
- constructor(provider, baseUrl, clientSideRoutingOnly = false) {
97
- this.provider = provider;
98
- this.baseUrl = baseUrl;
99
- this.clientSideRoutingOnly = clientSideRoutingOnly;
100
- }
101
- /**
102
- * Prepare the base URL for the request
103
- */
104
- makeBaseUrl(params) {
105
- return params.authMethod !== "provider-key" ? `${HF_ROUTER_URL}/${this.provider}` : this.baseUrl;
106
- }
107
- /**
108
- * Prepare the body for the request
109
- */
110
- makeBody(params) {
111
- if ("data" in params.args && !!params.args.data) {
112
- return params.args.data;
113
- }
114
- return JSON.stringify(this.preparePayload(params));
115
- }
116
- /**
117
- * Prepare the URL for the request
118
- */
119
- makeUrl(params) {
120
- const baseUrl = this.makeBaseUrl(params);
121
- const route = this.makeRoute(params).replace(/^\/+/, "");
122
- return `${baseUrl}/${route}`;
123
- }
124
- /**
125
- * Prepare the headers for the request
126
- */
127
- prepareHeaders(params, isBinary) {
128
- const headers = { Authorization: `Bearer ${params.accessToken}` };
129
- if (!isBinary) {
130
- headers["Content-Type"] = "application/json";
131
- }
132
- return headers;
133
- }
134
- };
135
- var BaseConversationalTask = class extends TaskProviderHelper {
136
- constructor(provider, baseUrl, clientSideRoutingOnly = false) {
137
- super(provider, baseUrl, clientSideRoutingOnly);
138
- }
139
- makeRoute() {
140
- return "v1/chat/completions";
141
- }
142
- preparePayload(params) {
143
- return {
144
- ...params.args,
145
- model: params.model
146
- };
147
- }
148
- async getResponse(response) {
149
- if (typeof response === "object" && Array.isArray(response?.choices) && typeof response?.created === "number" && typeof response?.id === "string" && typeof response?.model === "string" && /// Together.ai and Nebius do not output a system_fingerprint
150
- (response.system_fingerprint === void 0 || response.system_fingerprint === null || typeof response.system_fingerprint === "string") && typeof response?.usage === "object") {
151
- return response;
152
- }
153
- throw new InferenceOutputError("Expected ChatCompletionOutput");
154
- }
155
- };
156
- var BaseTextGenerationTask = class extends TaskProviderHelper {
157
- constructor(provider, baseUrl, clientSideRoutingOnly = false) {
158
- super(provider, baseUrl, clientSideRoutingOnly);
159
- }
160
- preparePayload(params) {
161
- return {
162
- ...params.args,
163
- model: params.model
164
- };
165
- }
166
- makeRoute() {
167
- return "v1/completions";
168
- }
169
- async getResponse(response) {
170
- const res = toArray(response);
171
- if (Array.isArray(res) && res.length > 0 && res.every(
172
- (x) => typeof x === "object" && !!x && "generated_text" in x && typeof x.generated_text === "string"
173
- )) {
174
- return res[0];
175
- }
176
- throw new InferenceOutputError("Expected Array<{generated_text: string}>");
177
- }
178
- };
179
-
180
- // src/utils/base64FromBytes.ts
181
- function base64FromBytes(arr) {
182
- if (globalThis.Buffer) {
183
- return globalThis.Buffer.from(arr).toString("base64");
184
- } else {
185
- const bin = [];
186
- arr.forEach((byte) => {
187
- bin.push(String.fromCharCode(byte));
188
- });
189
- return globalThis.btoa(bin.join(""));
190
- }
191
- }
192
-
193
- // src/utils/pick.ts
194
- function pick(o, props) {
195
- return Object.assign(
196
- {},
197
- ...props.map((prop) => {
198
- if (o[prop] !== void 0) {
199
- return { [prop]: o[prop] };
200
- }
201
- })
202
- );
203
- }
204
-
205
- // src/utils/typedInclude.ts
206
- function typedInclude(arr, v) {
207
- return arr.includes(v);
208
- }
209
-
210
- // src/utils/omit.ts
211
- function omit(o, props) {
212
- const propsArr = Array.isArray(props) ? props : [props];
213
- const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop));
214
- return pick(o, letsKeep);
215
- }
216
-
217
- // src/providers/hf-inference.ts
218
- var EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS = ["feature-extraction", "sentence-similarity"];
219
- var HFInferenceTask = class extends TaskProviderHelper {
220
- constructor() {
221
- super("hf-inference", `${HF_ROUTER_URL}/hf-inference`);
222
- }
223
- preparePayload(params) {
224
- return params.args;
225
- }
226
- makeUrl(params) {
227
- if (params.model.startsWith("http://") || params.model.startsWith("https://")) {
228
- return params.model;
229
- }
230
- return super.makeUrl(params);
231
- }
232
- makeRoute(params) {
233
- if (params.task && ["feature-extraction", "sentence-similarity"].includes(params.task)) {
234
- return `models/${params.model}/pipeline/${params.task}`;
235
- }
236
- return `models/${params.model}`;
237
- }
238
- async getResponse(response) {
239
- return response;
240
- }
241
- };
242
- var HFInferenceTextToImageTask = class extends HFInferenceTask {
243
- async getResponse(response, url, headers, outputType) {
244
- if (!response) {
245
- throw new InferenceOutputError("response is undefined");
246
- }
247
- if (typeof response == "object") {
248
- if ("data" in response && Array.isArray(response.data) && response.data[0].b64_json) {
249
- const base64Data = response.data[0].b64_json;
250
- if (outputType === "url") {
251
- return `data:image/jpeg;base64,${base64Data}`;
252
- }
253
- const base64Response = await fetch(`data:image/jpeg;base64,${base64Data}`);
254
- return await base64Response.blob();
255
- }
256
- if ("output" in response && Array.isArray(response.output)) {
257
- if (outputType === "url") {
258
- return response.output[0];
259
- }
260
- const urlResponse = await fetch(response.output[0]);
261
- const blob = await urlResponse.blob();
262
- return blob;
263
- }
264
- }
265
- if (response instanceof Blob) {
266
- if (outputType === "url") {
267
- const b64 = await response.arrayBuffer().then((buf) => Buffer.from(buf).toString("base64"));
268
- return `data:image/jpeg;base64,${b64}`;
269
- }
270
- return response;
271
- }
272
- throw new InferenceOutputError("Expected a Blob ");
273
- }
274
- };
275
- var HFInferenceConversationalTask = class extends HFInferenceTask {
276
- makeUrl(params) {
277
- let url;
278
- if (params.model.startsWith("http://") || params.model.startsWith("https://")) {
279
- url = params.model.trim();
280
- } else {
281
- url = `${this.makeBaseUrl(params)}/models/${params.model}`;
282
- }
283
- url = url.replace(/\/+$/, "");
284
- if (url.endsWith("/v1")) {
285
- url += "/chat/completions";
286
- } else if (!url.endsWith("/chat/completions")) {
287
- url += "/v1/chat/completions";
288
- }
289
- return url;
290
- }
291
- preparePayload(params) {
292
- return {
293
- ...params.args,
294
- model: params.model
295
- };
296
- }
297
- async getResponse(response) {
298
- return response;
299
- }
300
- };
301
- var HFInferenceTextGenerationTask = class extends HFInferenceTask {
302
- async getResponse(response) {
303
- const res = toArray(response);
304
- if (Array.isArray(res) && res.every((x) => "generated_text" in x && typeof x?.generated_text === "string")) {
305
- return res?.[0];
306
- }
307
- throw new InferenceOutputError("Expected Array<{generated_text: string}>");
308
- }
309
- };
310
- var HFInferenceAudioClassificationTask = class extends HFInferenceTask {
311
- async getResponse(response) {
312
- if (Array.isArray(response) && response.every(
313
- (x) => typeof x === "object" && x !== null && typeof x.label === "string" && typeof x.score === "number"
314
- )) {
315
- return response;
316
- }
317
- throw new InferenceOutputError("Expected Array<{label: string, score: number}> but received different format");
318
- }
319
- };
320
- var HFInferenceAutomaticSpeechRecognitionTask = class extends HFInferenceTask {
321
- async getResponse(response) {
322
- return response;
323
- }
324
- async preparePayloadAsync(args) {
325
- return "data" in args ? args : {
326
- ...omit(args, "inputs"),
327
- data: args.inputs
328
- };
329
- }
330
- };
331
- var HFInferenceAudioToAudioTask = class extends HFInferenceTask {
332
- async getResponse(response) {
333
- if (!Array.isArray(response)) {
334
- throw new InferenceOutputError("Expected Array");
335
- }
336
- if (!response.every((elem) => {
337
- return typeof elem === "object" && elem && "label" in elem && typeof elem.label === "string" && "content-type" in elem && typeof elem["content-type"] === "string" && "blob" in elem && typeof elem.blob === "string";
338
- })) {
339
- throw new InferenceOutputError("Expected Array<{label: string, audio: Blob}>");
340
- }
341
- return response;
342
- }
343
- };
344
- var HFInferenceDocumentQuestionAnsweringTask = class extends HFInferenceTask {
345
- async getResponse(response) {
346
- if (Array.isArray(response) && response.every(
347
- (elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && (typeof elem.end === "number" || typeof elem.end === "undefined") && (typeof elem.score === "number" || typeof elem.score === "undefined") && (typeof elem.start === "number" || typeof elem.start === "undefined")
348
- )) {
349
- return response[0];
350
- }
351
- throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
352
- }
353
- };
354
- var HFInferenceFeatureExtractionTask = class extends HFInferenceTask {
355
- async getResponse(response) {
356
- const isNumArrayRec = (arr, maxDepth, curDepth = 0) => {
357
- if (curDepth > maxDepth)
358
- return false;
359
- if (arr.every((x) => Array.isArray(x))) {
360
- return arr.every((x) => isNumArrayRec(x, maxDepth, curDepth + 1));
361
- } else {
362
- return arr.every((x) => typeof x === "number");
363
- }
364
- };
365
- if (Array.isArray(response) && isNumArrayRec(response, 3, 0)) {
366
- return response;
367
- }
368
- throw new InferenceOutputError("Expected Array<number[][][] | number[][] | number[] | number>");
369
- }
370
- };
371
- var HFInferenceImageClassificationTask = class extends HFInferenceTask {
372
- async getResponse(response) {
373
- if (Array.isArray(response) && response.every((x) => typeof x.label === "string" && typeof x.score === "number")) {
374
- return response;
375
- }
376
- throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
377
- }
378
- };
379
- var HFInferenceImageSegmentationTask = class extends HFInferenceTask {
380
- async getResponse(response) {
381
- if (Array.isArray(response) && response.every(
382
- (x) => typeof x.label === "string" && typeof x.mask === "string" && (x.score === void 0 || typeof x.score === "number")
383
- )) {
384
- return response;
385
- }
386
- throw new InferenceOutputError("Expected Array<{label: string, mask: string, score: number}>");
387
- }
388
- };
389
- var HFInferenceImageToTextTask = class extends HFInferenceTask {
390
- async getResponse(response) {
391
- if (typeof response?.generated_text !== "string") {
392
- throw new InferenceOutputError("Expected {generated_text: string}");
393
- }
394
- return response;
395
- }
396
- };
397
- var HFInferenceImageToImageTask = class extends HFInferenceTask {
398
- async preparePayloadAsync(args) {
399
- if (!args.parameters) {
400
- return {
401
- ...args,
402
- model: args.model,
403
- data: args.inputs
404
- };
405
- } else {
406
- return {
407
- ...args,
408
- inputs: base64FromBytes(
409
- new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
410
- )
411
- };
412
- }
413
- }
414
- async getResponse(response) {
415
- if (response instanceof Blob) {
416
- return response;
417
- }
418
- throw new InferenceOutputError("Expected Blob");
419
- }
420
- };
421
- var HFInferenceObjectDetectionTask = class extends HFInferenceTask {
422
- async getResponse(response) {
423
- if (Array.isArray(response) && response.every(
424
- (x) => typeof x.label === "string" && typeof x.score === "number" && typeof x.box.xmin === "number" && typeof x.box.ymin === "number" && typeof x.box.xmax === "number" && typeof x.box.ymax === "number"
425
- )) {
426
- return response;
427
- }
428
- throw new InferenceOutputError(
429
- "Expected Array<{label: string, score: number, box: {xmin: number, ymin: number, xmax: number, ymax: number}}>"
430
- );
431
- }
432
- };
433
- var HFInferenceZeroShotImageClassificationTask = class extends HFInferenceTask {
434
- async getResponse(response) {
435
- if (Array.isArray(response) && response.every((x) => typeof x.label === "string" && typeof x.score === "number")) {
436
- return response;
437
- }
438
- throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
439
- }
440
- };
441
- var HFInferenceTextClassificationTask = class extends HFInferenceTask {
442
- async getResponse(response) {
443
- const output = response?.[0];
444
- if (Array.isArray(output) && output.every((x) => typeof x?.label === "string" && typeof x.score === "number")) {
445
- return output;
446
- }
447
- throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
448
- }
449
- };
450
- var HFInferenceQuestionAnsweringTask = class extends HFInferenceTask {
451
- async getResponse(response) {
452
- if (Array.isArray(response) ? response.every(
453
- (elem) => typeof elem === "object" && !!elem && typeof elem.answer === "string" && typeof elem.end === "number" && typeof elem.score === "number" && typeof elem.start === "number"
454
- ) : typeof response === "object" && !!response && typeof response.answer === "string" && typeof response.end === "number" && typeof response.score === "number" && typeof response.start === "number") {
455
- return Array.isArray(response) ? response[0] : response;
456
- }
457
- throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
458
- }
459
- };
460
- var HFInferenceFillMaskTask = class extends HFInferenceTask {
461
- async getResponse(response) {
462
- if (Array.isArray(response) && response.every(
463
- (x) => typeof x.score === "number" && typeof x.sequence === "string" && typeof x.token === "number" && typeof x.token_str === "string"
464
- )) {
465
- return response;
466
- }
467
- throw new InferenceOutputError(
468
- "Expected Array<{score: number, sequence: string, token: number, token_str: string}>"
469
- );
470
- }
471
- };
472
- var HFInferenceZeroShotClassificationTask = class extends HFInferenceTask {
473
- async getResponse(response) {
474
- if (Array.isArray(response) && response.every(
475
- (x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string"
476
- )) {
477
- return response;
478
- }
479
- throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>");
480
- }
481
- };
482
- var HFInferenceSentenceSimilarityTask = class extends HFInferenceTask {
483
- async getResponse(response) {
484
- if (Array.isArray(response) && response.every((x) => typeof x === "number")) {
485
- return response;
486
- }
487
- throw new InferenceOutputError("Expected Array<number>");
488
- }
489
- };
490
- var HFInferenceTableQuestionAnsweringTask = class extends HFInferenceTask {
491
- static validate(elem) {
492
- return typeof elem === "object" && !!elem && "aggregator" in elem && typeof elem.aggregator === "string" && "answer" in elem && typeof elem.answer === "string" && "cells" in elem && Array.isArray(elem.cells) && elem.cells.every((x) => typeof x === "string") && "coordinates" in elem && Array.isArray(elem.coordinates) && elem.coordinates.every(
493
- (coord) => Array.isArray(coord) && coord.every((x) => typeof x === "number")
494
- );
495
- }
496
- async getResponse(response) {
497
- if (Array.isArray(response) && Array.isArray(response) ? response.every((elem) => HFInferenceTableQuestionAnsweringTask.validate(elem)) : HFInferenceTableQuestionAnsweringTask.validate(response)) {
498
- return Array.isArray(response) ? response[0] : response;
499
- }
500
- throw new InferenceOutputError(
501
- "Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}"
502
- );
503
- }
504
- };
505
- var HFInferenceTokenClassificationTask = class extends HFInferenceTask {
506
- async getResponse(response) {
507
- if (Array.isArray(response) && response.every(
508
- (x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string"
509
- )) {
510
- return response;
511
- }
512
- throw new InferenceOutputError(
513
- "Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
514
- );
515
- }
516
- };
517
- var HFInferenceTranslationTask = class extends HFInferenceTask {
518
- async getResponse(response) {
519
- if (Array.isArray(response) && response.every((x) => typeof x?.translation_text === "string")) {
520
- return response?.length === 1 ? response?.[0] : response;
521
- }
522
- throw new InferenceOutputError("Expected Array<{translation_text: string}>");
523
- }
524
- };
525
- var HFInferenceSummarizationTask = class extends HFInferenceTask {
526
- async getResponse(response) {
527
- if (Array.isArray(response) && response.every((x) => typeof x?.summary_text === "string")) {
528
- return response?.[0];
529
- }
530
- throw new InferenceOutputError("Expected Array<{summary_text: string}>");
531
- }
532
- };
533
- var HFInferenceTextToSpeechTask = class extends HFInferenceTask {
534
- async getResponse(response) {
535
- return response;
536
- }
537
- };
538
- var HFInferenceTabularClassificationTask = class extends HFInferenceTask {
539
- async getResponse(response) {
540
- if (Array.isArray(response) && response.every((x) => typeof x === "number")) {
541
- return response;
542
- }
543
- throw new InferenceOutputError("Expected Array<number>");
544
- }
545
- };
546
- var HFInferenceVisualQuestionAnsweringTask = class extends HFInferenceTask {
547
- async getResponse(response) {
548
- if (Array.isArray(response) && response.every(
549
- (elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && typeof elem.score === "number"
550
- )) {
551
- return response[0];
552
- }
553
- throw new InferenceOutputError("Expected Array<{answer: string, score: number}>");
554
- }
555
- };
556
- var HFInferenceTabularRegressionTask = class extends HFInferenceTask {
557
- async getResponse(response) {
558
- if (Array.isArray(response) && response.every((x) => typeof x === "number")) {
559
- return response;
560
- }
561
- throw new InferenceOutputError("Expected Array<number>");
562
- }
563
- };
564
- var HFInferenceTextToAudioTask = class extends HFInferenceTask {
565
- async getResponse(response) {
566
- return response;
567
- }
568
- };
569
-
570
- // src/lib/getInferenceProviderMapping.ts
571
- var inferenceProviderMappingCache = /* @__PURE__ */ new Map();
572
- async function fetchInferenceProviderMappingForModel(modelId, accessToken, options) {
573
- let inferenceProviderMapping;
574
- if (inferenceProviderMappingCache.has(modelId)) {
575
- inferenceProviderMapping = inferenceProviderMappingCache.get(modelId);
576
- } else {
577
- const resp = await (options?.fetch ?? fetch)(
578
- `${HF_HUB_URL}/api/models/${modelId}?expand[]=inferenceProviderMapping`,
579
- {
580
- headers: accessToken?.startsWith("hf_") ? { Authorization: `Bearer ${accessToken}` } : {}
581
- }
582
- );
583
- if (resp.status === 404) {
584
- throw new Error(`Model ${modelId} does not exist`);
585
- }
586
- inferenceProviderMapping = await resp.json().then((json) => json.inferenceProviderMapping).catch(() => null);
587
- if (inferenceProviderMapping) {
588
- inferenceProviderMappingCache.set(modelId, inferenceProviderMapping);
589
- }
590
- }
591
- if (!inferenceProviderMapping) {
592
- throw new Error(`We have not been able to find inference provider information for model ${modelId}.`);
593
- }
594
- return inferenceProviderMapping;
595
- }
596
- async function getInferenceProviderMapping(params, options) {
597
- if (HARDCODED_MODEL_INFERENCE_MAPPING[params.provider][params.modelId]) {
598
- return HARDCODED_MODEL_INFERENCE_MAPPING[params.provider][params.modelId];
599
- }
600
- const inferenceProviderMapping = await fetchInferenceProviderMappingForModel(
601
- params.modelId,
602
- params.accessToken,
603
- options
604
- );
605
- const providerMapping = inferenceProviderMapping[params.provider];
606
- if (providerMapping) {
607
- const equivalentTasks = params.provider === "hf-inference" && typedInclude(EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS, params.task) ? EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS : [params.task];
608
- if (!typedInclude(equivalentTasks, providerMapping.task)) {
609
- throw new Error(
610
- `Model ${params.modelId} is not supported for task ${params.task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`
611
- );
612
- }
613
- if (providerMapping.status === "staging") {
614
- console.warn(
615
- `Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
616
- );
617
- }
618
- return { ...providerMapping, hfModelId: params.modelId };
619
- }
620
- return null;
621
- }
622
- async function resolveProvider(provider, modelId, endpointUrl) {
623
- if (endpointUrl) {
624
- if (provider) {
625
- throw new Error("Specifying both endpointUrl and provider is not supported.");
626
- }
627
- return "hf-inference";
628
- }
629
- if (!provider) {
630
- console.log(
631
- "Defaulting to 'auto' which will select the first provider available for the model, sorted by the user's order in https://hf.co/settings/inference-providers."
632
- );
633
- provider = "auto";
634
- }
635
- if (provider === "auto") {
636
- if (!modelId) {
637
- throw new Error("Specifying a model is required when provider is 'auto'");
638
- }
639
- const inferenceProviderMapping = await fetchInferenceProviderMappingForModel(modelId);
640
- provider = Object.keys(inferenceProviderMapping)[0];
641
- }
642
- if (!provider) {
643
- throw new Error(`No Inference Provider available for model ${modelId}.`);
644
- }
645
- return provider;
646
- }
647
-
648
- // src/utils/delay.ts
649
- function delay(ms) {
650
- return new Promise((resolve) => {
651
- setTimeout(() => resolve(), ms);
652
- });
653
- }
654
-
655
- // src/providers/black-forest-labs.ts
656
- var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
657
- var BlackForestLabsTextToImageTask = class extends TaskProviderHelper {
658
- constructor() {
659
- super("black-forest-labs", BLACK_FOREST_LABS_AI_API_BASE_URL);
660
- }
661
- preparePayload(params) {
662
- return {
663
- ...omit(params.args, ["inputs", "parameters"]),
664
- ...params.args.parameters,
665
- prompt: params.args.inputs
666
- };
667
- }
668
- prepareHeaders(params, binary) {
669
- const headers = {
670
- Authorization: params.authMethod !== "provider-key" ? `Bearer ${params.accessToken}` : `X-Key ${params.accessToken}`
671
- };
672
- if (!binary) {
673
- headers["Content-Type"] = "application/json";
674
- }
675
- return headers;
676
- }
677
- makeRoute(params) {
678
- if (!params) {
679
- throw new Error("Params are required");
680
- }
681
- return `/v1/${params.model}`;
682
- }
683
- async getResponse(response, url, headers, outputType) {
684
- const urlObj = new URL(response.polling_url);
685
- for (let step = 0; step < 5; step++) {
686
- await delay(1e3);
687
- console.debug(`Polling Black Forest Labs API for the result... ${step + 1}/5`);
688
- urlObj.searchParams.set("attempt", step.toString(10));
689
- const resp = await fetch(urlObj, { headers: { "Content-Type": "application/json" } });
690
- if (!resp.ok) {
691
- throw new InferenceOutputError("Failed to fetch result from black forest labs API");
692
- }
693
- const payload = await resp.json();
694
- if (typeof payload === "object" && payload && "status" in payload && typeof payload.status === "string" && payload.status === "Ready" && "result" in payload && typeof payload.result === "object" && payload.result && "sample" in payload.result && typeof payload.result.sample === "string") {
695
- if (outputType === "url") {
696
- return payload.result.sample;
697
- }
698
- const image = await fetch(payload.result.sample);
699
- return await image.blob();
700
- }
701
- }
702
- throw new InferenceOutputError("Failed to fetch result from black forest labs API");
703
- }
704
- };
705
-
706
- // src/providers/cerebras.ts
707
- var CerebrasConversationalTask = class extends BaseConversationalTask {
708
- constructor() {
709
- super("cerebras", "https://api.cerebras.ai");
710
- }
711
- };
712
-
713
- // src/providers/cohere.ts
714
- var CohereConversationalTask = class extends BaseConversationalTask {
715
- constructor() {
716
- super("cohere", "https://api.cohere.com");
717
- }
718
- makeRoute() {
719
- return "/compatibility/v1/chat/completions";
720
- }
721
- };
722
-
723
- // src/lib/isUrl.ts
724
- function isUrl(modelOrUrl) {
725
- return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
726
- }
727
-
728
- // src/providers/fal-ai.ts
729
- var FAL_AI_SUPPORTED_BLOB_TYPES = ["audio/mpeg", "audio/mp4", "audio/wav", "audio/x-wav"];
730
- var FalAITask = class extends TaskProviderHelper {
731
- constructor(url) {
732
- super("fal-ai", url || "https://fal.run");
733
- }
734
- preparePayload(params) {
735
- return params.args;
736
- }
737
- makeRoute(params) {
738
- return `/${params.model}`;
739
- }
740
- prepareHeaders(params, binary) {
741
- const headers = {
742
- Authorization: params.authMethod !== "provider-key" ? `Bearer ${params.accessToken}` : `Key ${params.accessToken}`
743
- };
744
- if (!binary) {
745
- headers["Content-Type"] = "application/json";
746
- }
747
- return headers;
748
- }
749
- };
750
- function buildLoraPath(modelId, adapterWeightsPath) {
751
- return `${HF_HUB_URL}/${modelId}/resolve/main/${adapterWeightsPath}`;
752
- }
753
- var FalAITextToImageTask = class extends FalAITask {
754
- preparePayload(params) {
755
- const payload = {
756
- ...omit(params.args, ["inputs", "parameters"]),
757
- ...params.args.parameters,
758
- sync_mode: true,
759
- prompt: params.args.inputs
760
- };
761
- if (params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath) {
762
- payload.loras = [
763
- {
764
- path: buildLoraPath(params.mapping.hfModelId, params.mapping.adapterWeightsPath),
765
- scale: 1
766
- }
767
- ];
768
- if (params.mapping.providerId === "fal-ai/lora") {
769
- payload.model_name = "stabilityai/stable-diffusion-xl-base-1.0";
770
- }
771
- }
772
- return payload;
773
- }
774
- async getResponse(response, outputType) {
775
- if (typeof response === "object" && "images" in response && Array.isArray(response.images) && response.images.length > 0 && "url" in response.images[0] && typeof response.images[0].url === "string") {
776
- if (outputType === "url") {
777
- return response.images[0].url;
778
- }
779
- const urlResponse = await fetch(response.images[0].url);
780
- return await urlResponse.blob();
781
- }
782
- throw new InferenceOutputError("Expected Fal.ai text-to-image response format");
783
- }
784
- };
785
- var FalAITextToVideoTask = class extends FalAITask {
786
- constructor() {
787
- super("https://queue.fal.run");
788
- }
789
- makeRoute(params) {
790
- if (params.authMethod !== "provider-key") {
791
- return `/${params.model}?_subdomain=queue`;
792
- }
793
- return `/${params.model}`;
794
- }
795
- preparePayload(params) {
796
- return {
797
- ...omit(params.args, ["inputs", "parameters"]),
798
- ...params.args.parameters,
799
- prompt: params.args.inputs
800
- };
801
- }
802
- async getResponse(response, url, headers) {
803
- if (!url || !headers) {
804
- throw new InferenceOutputError("URL and headers are required for text-to-video task");
805
- }
806
- const requestId = response.request_id;
807
- if (!requestId) {
808
- throw new InferenceOutputError("No request ID found in the response");
809
- }
810
- let status = response.status;
811
- const parsedUrl = new URL(url);
812
- const baseUrl = `${parsedUrl.protocol}//${parsedUrl.host}${parsedUrl.host === "router.huggingface.co" ? "/fal-ai" : ""}`;
813
- const modelId = new URL(response.response_url).pathname;
814
- const queryParams = parsedUrl.search;
815
- const statusUrl = `${baseUrl}${modelId}/status${queryParams}`;
816
- const resultUrl = `${baseUrl}${modelId}${queryParams}`;
817
- while (status !== "COMPLETED") {
818
- await delay(500);
819
- const statusResponse = await fetch(statusUrl, { headers });
820
- if (!statusResponse.ok) {
821
- throw new InferenceOutputError("Failed to fetch response status from fal-ai API");
822
- }
823
- try {
824
- status = (await statusResponse.json()).status;
825
- } catch (error) {
826
- throw new InferenceOutputError("Failed to parse status response from fal-ai API");
827
- }
828
- }
829
- const resultResponse = await fetch(resultUrl, { headers });
830
- let result;
831
- try {
832
- result = await resultResponse.json();
833
- } catch (error) {
834
- throw new InferenceOutputError("Failed to parse result response from fal-ai API");
835
- }
836
- if (typeof result === "object" && !!result && "video" in result && typeof result.video === "object" && !!result.video && "url" in result.video && typeof result.video.url === "string" && isUrl(result.video.url)) {
837
- const urlResponse = await fetch(result.video.url);
838
- return await urlResponse.blob();
839
- } else {
840
- throw new InferenceOutputError(
841
- "Expected { video: { url: string } } result format, got instead: " + JSON.stringify(result)
842
- );
843
- }
844
- }
845
- };
846
- var FalAIAutomaticSpeechRecognitionTask = class extends FalAITask {
847
- prepareHeaders(params, binary) {
848
- const headers = super.prepareHeaders(params, binary);
849
- headers["Content-Type"] = "application/json";
850
- return headers;
851
- }
852
- async getResponse(response) {
853
- const res = response;
854
- if (typeof res?.text !== "string") {
855
- throw new InferenceOutputError(
856
- `Expected { text: string } format from Fal.ai Automatic Speech Recognition, got: ${JSON.stringify(response)}`
857
- );
858
- }
859
- return { text: res.text };
860
- }
861
- async preparePayloadAsync(args) {
862
- const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : void 0;
863
- const contentType = blob?.type;
864
- if (!contentType) {
865
- throw new Error(
866
- `Unable to determine the input's content-type. Make sure your are passing a Blob when using provider fal-ai.`
867
- );
868
- }
869
- if (!FAL_AI_SUPPORTED_BLOB_TYPES.includes(contentType)) {
870
- throw new Error(
871
- `Provider fal-ai does not support blob type ${contentType} - supported content types are: ${FAL_AI_SUPPORTED_BLOB_TYPES.join(
872
- ", "
873
- )}`
874
- );
875
- }
876
- const base64audio = base64FromBytes(new Uint8Array(await blob.arrayBuffer()));
877
- return {
878
- ..."data" in args ? omit(args, "data") : omit(args, "inputs"),
879
- audio_url: `data:${contentType};base64,${base64audio}`
880
- };
881
- }
882
- };
883
- var FalAITextToSpeechTask = class extends FalAITask {
884
- preparePayload(params) {
885
- return {
886
- ...omit(params.args, ["inputs", "parameters"]),
887
- ...params.args.parameters,
888
- text: params.args.inputs
889
- };
890
- }
891
- async getResponse(response) {
892
- const res = response;
893
- if (typeof res?.audio?.url !== "string") {
894
- throw new InferenceOutputError(
895
- `Expected { audio: { url: string } } format from Fal.ai Text-to-Speech, got: ${JSON.stringify(response)}`
896
- );
897
- }
898
- try {
899
- const urlResponse = await fetch(res.audio.url);
900
- if (!urlResponse.ok) {
901
- throw new Error(`Failed to fetch audio from ${res.audio.url}: ${urlResponse.statusText}`);
902
- }
903
- return await urlResponse.blob();
904
- } catch (error) {
905
- throw new InferenceOutputError(
906
- `Error fetching or processing audio from Fal.ai Text-to-Speech URL: ${res.audio.url}. ${error instanceof Error ? error.message : String(error)}`
907
- );
908
- }
909
- }
910
- };
911
-
912
- // src/providers/featherless-ai.ts
913
- var FEATHERLESS_API_BASE_URL = "https://api.featherless.ai";
914
- var FeatherlessAIConversationalTask = class extends BaseConversationalTask {
915
- constructor() {
916
- super("featherless-ai", FEATHERLESS_API_BASE_URL);
917
- }
918
- };
919
- var FeatherlessAITextGenerationTask = class extends BaseTextGenerationTask {
920
- constructor() {
921
- super("featherless-ai", FEATHERLESS_API_BASE_URL);
922
- }
923
- preparePayload(params) {
924
- return {
925
- ...params.args,
926
- ...params.args.parameters,
927
- model: params.model,
928
- prompt: params.args.inputs
929
- };
930
- }
931
- async getResponse(response) {
932
- if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
933
- const completion = response.choices[0];
934
- return {
935
- generated_text: completion.text
936
- };
937
- }
938
- throw new InferenceOutputError("Expected Featherless AI text generation response format");
939
- }
940
- };
941
-
942
- // src/providers/fireworks-ai.ts
943
- var FireworksConversationalTask = class extends BaseConversationalTask {
944
- constructor() {
945
- super("fireworks-ai", "https://api.fireworks.ai");
946
- }
947
- makeRoute() {
948
- return "/inference/v1/chat/completions";
949
- }
950
- };
951
-
952
- // src/providers/groq.ts
953
- var GROQ_API_BASE_URL = "https://api.groq.com";
954
- var GroqTextGenerationTask = class extends BaseTextGenerationTask {
955
- constructor() {
956
- super("groq", GROQ_API_BASE_URL);
957
- }
958
- makeRoute() {
959
- return "/openai/v1/chat/completions";
960
- }
961
- };
962
- var GroqConversationalTask = class extends BaseConversationalTask {
963
- constructor() {
964
- super("groq", GROQ_API_BASE_URL);
965
- }
966
- makeRoute() {
967
- return "/openai/v1/chat/completions";
968
- }
969
- };
970
-
971
- // src/providers/hyperbolic.ts
972
- var HYPERBOLIC_API_BASE_URL = "https://api.hyperbolic.xyz";
973
- var HyperbolicConversationalTask = class extends BaseConversationalTask {
974
- constructor() {
975
- super("hyperbolic", HYPERBOLIC_API_BASE_URL);
976
- }
977
- };
978
- var HyperbolicTextGenerationTask = class extends BaseTextGenerationTask {
979
- constructor() {
980
- super("hyperbolic", HYPERBOLIC_API_BASE_URL);
981
- }
982
- makeRoute() {
983
- return "v1/chat/completions";
984
- }
985
- preparePayload(params) {
986
- return {
987
- messages: [{ content: params.args.inputs, role: "user" }],
988
- ...params.args.parameters ? {
989
- max_tokens: params.args.parameters.max_new_tokens,
990
- ...omit(params.args.parameters, "max_new_tokens")
991
- } : void 0,
992
- ...omit(params.args, ["inputs", "parameters"]),
993
- model: params.model
994
- };
995
- }
996
- async getResponse(response) {
997
- if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
998
- const completion = response.choices[0];
999
- return {
1000
- generated_text: completion.message.content
1001
- };
1002
- }
1003
- throw new InferenceOutputError("Expected Hyperbolic text generation response format");
1004
- }
1005
- };
1006
- var HyperbolicTextToImageTask = class extends TaskProviderHelper {
1007
- constructor() {
1008
- super("hyperbolic", HYPERBOLIC_API_BASE_URL);
1009
- }
1010
- makeRoute(params) {
1011
- return `/v1/images/generations`;
1012
- }
1013
- preparePayload(params) {
1014
- return {
1015
- ...omit(params.args, ["inputs", "parameters"]),
1016
- ...params.args.parameters,
1017
- prompt: params.args.inputs,
1018
- model_name: params.model
1019
- };
1020
- }
1021
- async getResponse(response, url, headers, outputType) {
1022
- if (typeof response === "object" && "images" in response && Array.isArray(response.images) && response.images[0] && typeof response.images[0].image === "string") {
1023
- if (outputType === "url") {
1024
- return `data:image/jpeg;base64,${response.images[0].image}`;
1025
- }
1026
- return fetch(`data:image/jpeg;base64,${response.images[0].image}`).then((res) => res.blob());
1027
- }
1028
- throw new InferenceOutputError("Expected Hyperbolic text-to-image response format");
1029
- }
1030
- };
1031
-
1032
- // src/providers/nebius.ts
1033
- var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
1034
- var NebiusConversationalTask = class extends BaseConversationalTask {
1035
- constructor() {
1036
- super("nebius", NEBIUS_API_BASE_URL);
1037
- }
1038
- };
1039
- var NebiusTextGenerationTask = class extends BaseTextGenerationTask {
1040
- constructor() {
1041
- super("nebius", NEBIUS_API_BASE_URL);
1042
- }
1043
- };
1044
- var NebiusTextToImageTask = class extends TaskProviderHelper {
1045
- constructor() {
1046
- super("nebius", NEBIUS_API_BASE_URL);
1047
- }
1048
- preparePayload(params) {
1049
- return {
1050
- ...omit(params.args, ["inputs", "parameters"]),
1051
- ...params.args.parameters,
1052
- response_format: "b64_json",
1053
- prompt: params.args.inputs,
1054
- model: params.model
1055
- };
1056
- }
1057
- makeRoute() {
1058
- return "v1/images/generations";
1059
- }
1060
- async getResponse(response, url, headers, outputType) {
1061
- if (typeof response === "object" && "data" in response && Array.isArray(response.data) && response.data.length > 0 && "b64_json" in response.data[0] && typeof response.data[0].b64_json === "string") {
1062
- const base64Data = response.data[0].b64_json;
1063
- if (outputType === "url") {
1064
- return `data:image/jpeg;base64,${base64Data}`;
1065
- }
1066
- return fetch(`data:image/jpeg;base64,${base64Data}`).then((res) => res.blob());
1067
- }
1068
- throw new InferenceOutputError("Expected Nebius text-to-image response format");
1069
- }
1070
- };
1071
- var NebiusFeatureExtractionTask = class extends TaskProviderHelper {
1072
- constructor() {
1073
- super("nebius", NEBIUS_API_BASE_URL);
1074
- }
1075
- preparePayload(params) {
1076
- return {
1077
- input: params.args.inputs,
1078
- model: params.model
1079
- };
1080
- }
1081
- makeRoute() {
1082
- return "v1/embeddings";
1083
- }
1084
- async getResponse(response) {
1085
- return response.data.map((item) => item.embedding);
1086
- }
1087
- };
1088
-
1089
- // src/providers/novita.ts
1090
- var NOVITA_API_BASE_URL = "https://api.novita.ai";
1091
- var NovitaTextGenerationTask = class extends BaseTextGenerationTask {
1092
- constructor() {
1093
- super("novita", NOVITA_API_BASE_URL);
1094
- }
1095
- makeRoute() {
1096
- return "/v3/openai/chat/completions";
1097
- }
1098
- };
1099
- var NovitaConversationalTask = class extends BaseConversationalTask {
1100
- constructor() {
1101
- super("novita", NOVITA_API_BASE_URL);
1102
- }
1103
- makeRoute() {
1104
- return "/v3/openai/chat/completions";
1105
- }
1106
- };
1107
-
1108
- // src/providers/nscale.ts
1109
- var NSCALE_API_BASE_URL = "https://inference.api.nscale.com";
1110
- var NscaleConversationalTask = class extends BaseConversationalTask {
1111
- constructor() {
1112
- super("nscale", NSCALE_API_BASE_URL);
1113
- }
1114
- };
1115
- var NscaleTextToImageTask = class extends TaskProviderHelper {
1116
- constructor() {
1117
- super("nscale", NSCALE_API_BASE_URL);
1118
- }
1119
- preparePayload(params) {
1120
- return {
1121
- ...omit(params.args, ["inputs", "parameters"]),
1122
- ...params.args.parameters,
1123
- response_format: "b64_json",
1124
- prompt: params.args.inputs,
1125
- model: params.model
1126
- };
1127
- }
1128
- makeRoute() {
1129
- return "v1/images/generations";
1130
- }
1131
- async getResponse(response, url, headers, outputType) {
1132
- if (typeof response === "object" && "data" in response && Array.isArray(response.data) && response.data.length > 0 && "b64_json" in response.data[0] && typeof response.data[0].b64_json === "string") {
1133
- const base64Data = response.data[0].b64_json;
1134
- if (outputType === "url") {
1135
- return `data:image/jpeg;base64,${base64Data}`;
1136
- }
1137
- return fetch(`data:image/jpeg;base64,${base64Data}`).then((res) => res.blob());
1138
- }
1139
- throw new InferenceOutputError("Expected Nscale text-to-image response format");
1140
- }
1141
- };
1142
-
1143
- // src/providers/openai.ts
1144
- var OPENAI_API_BASE_URL = "https://api.openai.com";
1145
- var OpenAIConversationalTask = class extends BaseConversationalTask {
1146
- constructor() {
1147
- super("openai", OPENAI_API_BASE_URL, true);
1148
- }
1149
- };
1150
-
1151
- // src/providers/ovhcloud.ts
1152
- var OVHCLOUD_API_BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net";
1153
- var OvhCloudConversationalTask = class extends BaseConversationalTask {
1154
- constructor() {
1155
- super("ovhcloud", OVHCLOUD_API_BASE_URL);
1156
- }
1157
- };
1158
- var OvhCloudTextGenerationTask = class extends BaseTextGenerationTask {
1159
- constructor() {
1160
- super("ovhcloud", OVHCLOUD_API_BASE_URL);
1161
- }
1162
- preparePayload(params) {
1163
- return {
1164
- model: params.model,
1165
- ...omit(params.args, ["inputs", "parameters"]),
1166
- ...params.args.parameters ? {
1167
- max_tokens: params.args.parameters.max_new_tokens,
1168
- ...omit(params.args.parameters, "max_new_tokens")
1169
- } : void 0,
1170
- prompt: params.args.inputs
1171
- };
1172
- }
1173
- async getResponse(response) {
1174
- if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
1175
- const completion = response.choices[0];
1176
- return {
1177
- generated_text: completion.text
1178
- };
1179
- }
1180
- throw new InferenceOutputError("Expected OVHcloud text generation response format");
1181
- }
1182
- };
1183
-
1184
- // src/providers/replicate.ts
1185
- var ReplicateTask = class extends TaskProviderHelper {
1186
- constructor(url) {
1187
- super("replicate", url || "https://api.replicate.com");
1188
- }
1189
- makeRoute(params) {
1190
- if (params.model.includes(":")) {
1191
- return "v1/predictions";
1192
- }
1193
- return `v1/models/${params.model}/predictions`;
1194
- }
1195
- preparePayload(params) {
1196
- return {
1197
- input: {
1198
- ...omit(params.args, ["inputs", "parameters"]),
1199
- ...params.args.parameters,
1200
- prompt: params.args.inputs
1201
- },
1202
- version: params.model.includes(":") ? params.model.split(":")[1] : void 0
1203
- };
1204
- }
1205
- prepareHeaders(params, binary) {
1206
- const headers = { Authorization: `Bearer ${params.accessToken}`, Prefer: "wait" };
1207
- if (!binary) {
1208
- headers["Content-Type"] = "application/json";
1209
- }
1210
- return headers;
1211
- }
1212
- makeUrl(params) {
1213
- const baseUrl = this.makeBaseUrl(params);
1214
- if (params.model.includes(":")) {
1215
- return `${baseUrl}/v1/predictions`;
1216
- }
1217
- return `${baseUrl}/v1/models/${params.model}/predictions`;
1218
- }
1219
- };
1220
- var ReplicateTextToImageTask = class extends ReplicateTask {
1221
- preparePayload(params) {
1222
- return {
1223
- input: {
1224
- ...omit(params.args, ["inputs", "parameters"]),
1225
- ...params.args.parameters,
1226
- prompt: params.args.inputs,
1227
- lora_weights: params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath ? `https://huggingface.co/${params.mapping.hfModelId}` : void 0
1228
- },
1229
- version: params.model.includes(":") ? params.model.split(":")[1] : void 0
1230
- };
1231
- }
1232
- async getResponse(res, url, headers, outputType) {
1233
- if (typeof res === "object" && "output" in res && Array.isArray(res.output) && res.output.length > 0 && typeof res.output[0] === "string") {
1234
- if (outputType === "url") {
1235
- return res.output[0];
1236
- }
1237
- const urlResponse = await fetch(res.output[0]);
1238
- return await urlResponse.blob();
1239
- }
1240
- throw new InferenceOutputError("Expected Replicate text-to-image response format");
1241
- }
1242
- };
1243
- var ReplicateTextToSpeechTask = class extends ReplicateTask {
1244
- preparePayload(params) {
1245
- const payload = super.preparePayload(params);
1246
- const input = payload["input"];
1247
- if (typeof input === "object" && input !== null && "prompt" in input) {
1248
- const inputObj = input;
1249
- inputObj["text"] = inputObj["prompt"];
1250
- delete inputObj["prompt"];
1251
- }
1252
- return payload;
1253
- }
1254
- async getResponse(response) {
1255
- if (response instanceof Blob) {
1256
- return response;
1257
- }
1258
- if (response && typeof response === "object") {
1259
- if ("output" in response) {
1260
- if (typeof response.output === "string") {
1261
- const urlResponse = await fetch(response.output);
1262
- return await urlResponse.blob();
1263
- } else if (Array.isArray(response.output)) {
1264
- const urlResponse = await fetch(response.output[0]);
1265
- return await urlResponse.blob();
1266
- }
1267
- }
1268
- }
1269
- throw new InferenceOutputError("Expected Blob or object with output");
1270
- }
1271
- };
1272
- var ReplicateTextToVideoTask = class extends ReplicateTask {
1273
- async getResponse(response) {
1274
- if (typeof response === "object" && !!response && "output" in response && typeof response.output === "string" && isUrl(response.output)) {
1275
- const urlResponse = await fetch(response.output);
1276
- return await urlResponse.blob();
1277
- }
1278
- throw new InferenceOutputError("Expected { output: string }");
1279
- }
1280
- };
1281
-
1282
- // src/providers/sambanova.ts
1283
- var SambanovaConversationalTask = class extends BaseConversationalTask {
1284
- constructor() {
1285
- super("sambanova", "https://api.sambanova.ai");
1286
- }
1287
- };
1288
- var SambanovaFeatureExtractionTask = class extends TaskProviderHelper {
1289
- constructor() {
1290
- super("sambanova", "https://api.sambanova.ai");
1291
- }
1292
- makeRoute() {
1293
- return `/v1/embeddings`;
1294
- }
1295
- async getResponse(response) {
1296
- if (typeof response === "object" && "data" in response && Array.isArray(response.data)) {
1297
- return response.data.map((item) => item.embedding);
1298
- }
1299
- throw new InferenceOutputError(
1300
- "Expected Sambanova feature-extraction (embeddings) response format to be {'data' : list of {'embedding' : number[]}}"
1301
- );
1302
- }
1303
- preparePayload(params) {
1304
- return {
1305
- model: params.model,
1306
- input: params.args.inputs,
1307
- ...params.args
1308
- };
1309
- }
1310
- };
1311
-
1312
- // src/providers/together.ts
1313
- var TOGETHER_API_BASE_URL = "https://api.together.xyz";
1314
- var TogetherConversationalTask = class extends BaseConversationalTask {
1315
- constructor() {
1316
- super("together", TOGETHER_API_BASE_URL);
1317
- }
1318
- };
1319
- var TogetherTextGenerationTask = class extends BaseTextGenerationTask {
1320
- constructor() {
1321
- super("together", TOGETHER_API_BASE_URL);
1322
- }
1323
- preparePayload(params) {
1324
- return {
1325
- model: params.model,
1326
- ...params.args,
1327
- prompt: params.args.inputs
1328
- };
1329
- }
1330
- async getResponse(response) {
1331
- if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
1332
- const completion = response.choices[0];
1333
- return {
1334
- generated_text: completion.text
1335
- };
1336
- }
1337
- throw new InferenceOutputError("Expected Together text generation response format");
1338
- }
1339
- };
1340
- var TogetherTextToImageTask = class extends TaskProviderHelper {
1341
- constructor() {
1342
- super("together", TOGETHER_API_BASE_URL);
1343
- }
1344
- makeRoute() {
1345
- return "v1/images/generations";
1346
- }
1347
- preparePayload(params) {
1348
- return {
1349
- ...omit(params.args, ["inputs", "parameters"]),
1350
- ...params.args.parameters,
1351
- prompt: params.args.inputs,
1352
- response_format: "base64",
1353
- model: params.model
1354
- };
1355
- }
1356
- async getResponse(response, outputType) {
1357
- if (typeof response === "object" && "data" in response && Array.isArray(response.data) && response.data.length > 0 && "b64_json" in response.data[0] && typeof response.data[0].b64_json === "string") {
1358
- const base64Data = response.data[0].b64_json;
1359
- if (outputType === "url") {
1360
- return `data:image/jpeg;base64,${base64Data}`;
1361
- }
1362
- return fetch(`data:image/jpeg;base64,${base64Data}`).then((res) => res.blob());
1363
- }
1364
- throw new InferenceOutputError("Expected Together text-to-image response format");
1365
- }
1366
- };
1367
-
1368
- // src/lib/getProviderHelper.ts
1369
- var PROVIDERS = {
1370
- "black-forest-labs": {
1371
- "text-to-image": new BlackForestLabsTextToImageTask()
1372
- },
1373
- cerebras: {
1374
- conversational: new CerebrasConversationalTask()
1375
- },
1376
- cohere: {
1377
- conversational: new CohereConversationalTask()
1378
- },
1379
- "fal-ai": {
1380
- "text-to-image": new FalAITextToImageTask(),
1381
- "text-to-speech": new FalAITextToSpeechTask(),
1382
- "text-to-video": new FalAITextToVideoTask(),
1383
- "automatic-speech-recognition": new FalAIAutomaticSpeechRecognitionTask()
1384
- },
1385
- "featherless-ai": {
1386
- conversational: new FeatherlessAIConversationalTask(),
1387
- "text-generation": new FeatherlessAITextGenerationTask()
1388
- },
1389
- "hf-inference": {
1390
- "text-to-image": new HFInferenceTextToImageTask(),
1391
- conversational: new HFInferenceConversationalTask(),
1392
- "text-generation": new HFInferenceTextGenerationTask(),
1393
- "text-classification": new HFInferenceTextClassificationTask(),
1394
- "question-answering": new HFInferenceQuestionAnsweringTask(),
1395
- "audio-classification": new HFInferenceAudioClassificationTask(),
1396
- "automatic-speech-recognition": new HFInferenceAutomaticSpeechRecognitionTask(),
1397
- "fill-mask": new HFInferenceFillMaskTask(),
1398
- "feature-extraction": new HFInferenceFeatureExtractionTask(),
1399
- "image-classification": new HFInferenceImageClassificationTask(),
1400
- "image-segmentation": new HFInferenceImageSegmentationTask(),
1401
- "document-question-answering": new HFInferenceDocumentQuestionAnsweringTask(),
1402
- "image-to-text": new HFInferenceImageToTextTask(),
1403
- "object-detection": new HFInferenceObjectDetectionTask(),
1404
- "audio-to-audio": new HFInferenceAudioToAudioTask(),
1405
- "zero-shot-image-classification": new HFInferenceZeroShotImageClassificationTask(),
1406
- "zero-shot-classification": new HFInferenceZeroShotClassificationTask(),
1407
- "image-to-image": new HFInferenceImageToImageTask(),
1408
- "sentence-similarity": new HFInferenceSentenceSimilarityTask(),
1409
- "table-question-answering": new HFInferenceTableQuestionAnsweringTask(),
1410
- "tabular-classification": new HFInferenceTabularClassificationTask(),
1411
- "text-to-speech": new HFInferenceTextToSpeechTask(),
1412
- "token-classification": new HFInferenceTokenClassificationTask(),
1413
- translation: new HFInferenceTranslationTask(),
1414
- summarization: new HFInferenceSummarizationTask(),
1415
- "visual-question-answering": new HFInferenceVisualQuestionAnsweringTask(),
1416
- "tabular-regression": new HFInferenceTabularRegressionTask(),
1417
- "text-to-audio": new HFInferenceTextToAudioTask()
1418
- },
1419
- "fireworks-ai": {
1420
- conversational: new FireworksConversationalTask()
1421
- },
1422
- groq: {
1423
- conversational: new GroqConversationalTask(),
1424
- "text-generation": new GroqTextGenerationTask()
1425
- },
1426
- hyperbolic: {
1427
- "text-to-image": new HyperbolicTextToImageTask(),
1428
- conversational: new HyperbolicConversationalTask(),
1429
- "text-generation": new HyperbolicTextGenerationTask()
1430
- },
1431
- nebius: {
1432
- "text-to-image": new NebiusTextToImageTask(),
1433
- conversational: new NebiusConversationalTask(),
1434
- "text-generation": new NebiusTextGenerationTask(),
1435
- "feature-extraction": new NebiusFeatureExtractionTask()
1436
- },
1437
- novita: {
1438
- conversational: new NovitaConversationalTask(),
1439
- "text-generation": new NovitaTextGenerationTask()
1440
- },
1441
- nscale: {
1442
- "text-to-image": new NscaleTextToImageTask(),
1443
- conversational: new NscaleConversationalTask()
1444
- },
1445
- openai: {
1446
- conversational: new OpenAIConversationalTask()
1447
- },
1448
- ovhcloud: {
1449
- conversational: new OvhCloudConversationalTask(),
1450
- "text-generation": new OvhCloudTextGenerationTask()
1451
- },
1452
- replicate: {
1453
- "text-to-image": new ReplicateTextToImageTask(),
1454
- "text-to-speech": new ReplicateTextToSpeechTask(),
1455
- "text-to-video": new ReplicateTextToVideoTask()
1456
- },
1457
- sambanova: {
1458
- conversational: new SambanovaConversationalTask(),
1459
- "feature-extraction": new SambanovaFeatureExtractionTask()
1460
- },
1461
- together: {
1462
- "text-to-image": new TogetherTextToImageTask(),
1463
- conversational: new TogetherConversationalTask(),
1464
- "text-generation": new TogetherTextGenerationTask()
1465
- }
1466
- };
1467
- function getProviderHelper(provider, task) {
1468
- if (provider === "hf-inference" && !task || provider === "auto") {
1469
- return new HFInferenceTask();
1470
- }
1471
- if (!task) {
1472
- throw new Error("you need to provide a task name when using an external provider, e.g. 'text-to-image'");
1473
- }
1474
- if (!(provider in PROVIDERS)) {
1475
- throw new Error(`Provider '${provider}' not supported. Available providers: ${Object.keys(PROVIDERS)}`);
1476
- }
1477
- const providerTasks = PROVIDERS[provider];
1478
- if (!providerTasks || !(task in providerTasks)) {
1479
- throw new Error(
1480
- `Task '${task}' not supported for provider '${provider}'. Available tasks: ${Object.keys(providerTasks ?? {})}`
1481
- );
1482
- }
1483
- return providerTasks[task];
1484
- }
1485
-
1486
- // package.json
1487
- var name = "@huggingface/inference";
1488
- var version = "3.13.2";
1489
-
1490
- // src/lib/makeRequestOptions.ts
1491
- var tasks = null;
1492
- async function makeRequestOptions(args, providerHelper, options) {
1493
- const { model: maybeModel } = args;
1494
- const provider = providerHelper.provider;
1495
- const { task } = options ?? {};
1496
- if (args.endpointUrl && provider !== "hf-inference") {
1497
- throw new Error(`Cannot use endpointUrl with a third-party provider.`);
1498
- }
1499
- if (maybeModel && isUrl(maybeModel)) {
1500
- throw new Error(`Model URLs are no longer supported. Use endpointUrl instead.`);
1501
- }
1502
- if (args.endpointUrl) {
1503
- return makeRequestOptionsFromResolvedModel(
1504
- maybeModel ?? args.endpointUrl,
1505
- providerHelper,
1506
- args,
1507
- void 0,
1508
- options
1509
- );
1510
- }
1511
- if (!maybeModel && !task) {
1512
- throw new Error("No model provided, and no task has been specified.");
1513
- }
1514
- const hfModel = maybeModel ?? await loadDefaultModel(task);
1515
- if (providerHelper.clientSideRoutingOnly && !maybeModel) {
1516
- throw new Error(`Provider ${provider} requires a model ID to be passed directly.`);
1517
- }
1518
- const inferenceProviderMapping = providerHelper.clientSideRoutingOnly ? {
1519
- // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
1520
- providerId: removeProviderPrefix(maybeModel, provider),
1521
- // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
1522
- hfModelId: maybeModel,
1523
- status: "live",
1524
- // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
1525
- task
1526
- } : await getInferenceProviderMapping(
1527
- {
1528
- modelId: hfModel,
1529
- // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
1530
- task,
1531
- provider,
1532
- accessToken: args.accessToken
1533
- },
1534
- { fetch: options?.fetch }
1535
- );
1536
- if (!inferenceProviderMapping) {
1537
- throw new Error(`We have not been able to find inference provider information for model ${hfModel}.`);
1538
- }
1539
- return makeRequestOptionsFromResolvedModel(
1540
- inferenceProviderMapping.providerId,
1541
- providerHelper,
1542
- args,
1543
- inferenceProviderMapping,
1544
- options
1545
- );
1546
- }
1547
- function makeRequestOptionsFromResolvedModel(resolvedModel, providerHelper, args, mapping, options) {
1548
- const { accessToken, endpointUrl, provider: maybeProvider, model, ...remainingArgs } = args;
1549
- const provider = providerHelper.provider;
1550
- const { includeCredentials, task, signal, billTo } = options ?? {};
1551
- const authMethod = (() => {
1552
- if (providerHelper.clientSideRoutingOnly) {
1553
- if (accessToken && accessToken.startsWith("hf_")) {
1554
- throw new Error(`Provider ${provider} is closed-source and does not support HF tokens.`);
1555
- }
1556
- return "provider-key";
1557
- }
1558
- if (accessToken) {
1559
- return accessToken.startsWith("hf_") ? "hf-token" : "provider-key";
1560
- }
1561
- if (includeCredentials === "include") {
1562
- return "credentials-include";
1563
- }
1564
- return "none";
1565
- })();
1566
- const modelId = endpointUrl ?? resolvedModel;
1567
- const url = providerHelper.makeUrl({
1568
- authMethod,
1569
- model: modelId,
1570
- task
1571
- });
1572
- const headers = providerHelper.prepareHeaders(
1573
- {
1574
- accessToken,
1575
- authMethod
1576
- },
1577
- "data" in args && !!args.data
1578
- );
1579
- if (billTo) {
1580
- headers[HF_HEADER_X_BILL_TO] = billTo;
1581
- }
1582
- const ownUserAgent = `${name}/${version}`;
1583
- const userAgent = [ownUserAgent, typeof navigator !== "undefined" ? navigator.userAgent : void 0].filter((x) => x !== void 0).join(" ");
1584
- headers["User-Agent"] = userAgent;
1585
- const body = providerHelper.makeBody({
1586
- args: remainingArgs,
1587
- model: resolvedModel,
1588
- task,
1589
- mapping
1590
- });
1591
- let credentials;
1592
- if (typeof includeCredentials === "string") {
1593
- credentials = includeCredentials;
1594
- } else if (includeCredentials === true) {
1595
- credentials = "include";
1596
- }
1597
- const info = {
1598
- headers,
1599
- method: "POST",
1600
- body,
1601
- ...credentials ? { credentials } : void 0,
1602
- signal
1603
- };
1604
- return { url, info };
1605
- }
1606
- async function loadDefaultModel(task) {
1607
- if (!tasks) {
1608
- tasks = await loadTaskInfo();
1609
- }
1610
- const taskInfo = tasks[task];
1611
- if ((taskInfo?.models.length ?? 0) <= 0) {
1612
- throw new Error(`No default model defined for task ${task}, please define the model explicitly.`);
1613
- }
1614
- return taskInfo.models[0].id;
1615
- }
1616
- async function loadTaskInfo() {
1617
- const res = await fetch(`${HF_HUB_URL}/api/tasks`);
1618
- if (!res.ok) {
1619
- throw new Error("Failed to load tasks definitions from Hugging Face Hub.");
1620
- }
1621
- return await res.json();
1622
- }
1623
- function removeProviderPrefix(model, provider) {
1624
- if (!model.startsWith(`${provider}/`)) {
1625
- throw new Error(`Models from ${provider} must be prefixed by "${provider}/". Got "${model}".`);
1626
- }
1627
- return model.slice(provider.length + 1);
1628
- }
1629
-
1630
- // src/vendor/fetch-event-source/parse.ts
1631
- function getLines(onLine) {
1632
- let buffer;
1633
- let position;
1634
- let fieldLength;
1635
- let discardTrailingNewline = false;
1636
- return function onChunk(arr) {
1637
- if (buffer === void 0) {
1638
- buffer = arr;
1639
- position = 0;
1640
- fieldLength = -1;
1641
- } else {
1642
- buffer = concat(buffer, arr);
1643
- }
1644
- const bufLength = buffer.length;
1645
- let lineStart = 0;
1646
- while (position < bufLength) {
1647
- if (discardTrailingNewline) {
1648
- if (buffer[position] === 10 /* NewLine */) {
1649
- lineStart = ++position;
1650
- }
1651
- discardTrailingNewline = false;
1652
- }
1653
- let lineEnd = -1;
1654
- for (; position < bufLength && lineEnd === -1; ++position) {
1655
- switch (buffer[position]) {
1656
- case 58 /* Colon */:
1657
- if (fieldLength === -1) {
1658
- fieldLength = position - lineStart;
1659
- }
1660
- break;
1661
- case 13 /* CarriageReturn */:
1662
- discardTrailingNewline = true;
1663
- case 10 /* NewLine */:
1664
- lineEnd = position;
1665
- break;
1666
- }
1667
- }
1668
- if (lineEnd === -1) {
1669
- break;
1670
- }
1671
- onLine(buffer.subarray(lineStart, lineEnd), fieldLength);
1672
- lineStart = position;
1673
- fieldLength = -1;
1674
- }
1675
- if (lineStart === bufLength) {
1676
- buffer = void 0;
1677
- } else if (lineStart !== 0) {
1678
- buffer = buffer.subarray(lineStart);
1679
- position -= lineStart;
1680
- }
1681
- };
1682
- }
1683
- function getMessages(onId, onRetry, onMessage) {
1684
- let message = newMessage();
1685
- const decoder = new TextDecoder();
1686
- return function onLine(line, fieldLength) {
1687
- if (line.length === 0) {
1688
- onMessage?.(message);
1689
- message = newMessage();
1690
- } else if (fieldLength > 0) {
1691
- const field = decoder.decode(line.subarray(0, fieldLength));
1692
- const valueOffset = fieldLength + (line[fieldLength + 1] === 32 /* Space */ ? 2 : 1);
1693
- const value = decoder.decode(line.subarray(valueOffset));
1694
- switch (field) {
1695
- case "data":
1696
- message.data = message.data ? message.data + "\n" + value : value;
1697
- break;
1698
- case "event":
1699
- message.event = value;
1700
- break;
1701
- case "id":
1702
- onId(message.id = value);
1703
- break;
1704
- case "retry":
1705
- const retry = parseInt(value, 10);
1706
- if (!isNaN(retry)) {
1707
- onRetry(message.retry = retry);
1708
- }
1709
- break;
1710
- }
1711
- }
1712
- };
1713
- }
1714
- function concat(a, b) {
1715
- const res = new Uint8Array(a.length + b.length);
1716
- res.set(a);
1717
- res.set(b, a.length);
1718
- return res;
1719
- }
1720
- function newMessage() {
1721
- return {
1722
- data: "",
1723
- event: "",
1724
- id: "",
1725
- retry: void 0
1726
- };
1727
- }
1728
-
1729
- // src/utils/request.ts
1730
- async function innerRequest(args, providerHelper, options) {
1731
- const { url, info } = await makeRequestOptions(args, providerHelper, options);
1732
- const response = await (options?.fetch ?? fetch)(url, info);
1733
- const requestContext = { url, info };
1734
- if (options?.retry_on_error !== false && response.status === 503) {
1735
- return innerRequest(args, providerHelper, options);
1736
- }
1737
- if (!response.ok) {
1738
- const contentType = response.headers.get("Content-Type");
1739
- if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
1740
- const output = await response.json();
1741
- if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
1742
- throw new Error(
1743
- `Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
1744
- );
1745
- }
1746
- if (output.error || output.detail) {
1747
- throw new Error(JSON.stringify(output.error ?? output.detail));
1748
- } else {
1749
- throw new Error(output);
1750
- }
1751
- }
1752
- const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
1753
- throw new Error(message ?? "An error occurred while fetching the blob");
1754
- }
1755
- if (response.headers.get("Content-Type")?.startsWith("application/json")) {
1756
- const data = await response.json();
1757
- return { data, requestContext };
1758
- }
1759
- const blob = await response.blob();
1760
- return { data: blob, requestContext };
1761
- }
1762
- async function* innerStreamingRequest(args, providerHelper, options) {
1763
- const { url, info } = await makeRequestOptions({ ...args, stream: true }, providerHelper, options);
1764
- const response = await (options?.fetch ?? fetch)(url, info);
1765
- if (options?.retry_on_error !== false && response.status === 503) {
1766
- return yield* innerStreamingRequest(args, providerHelper, options);
1767
- }
1768
- if (!response.ok) {
1769
- if (response.headers.get("Content-Type")?.startsWith("application/json")) {
1770
- const output = await response.json();
1771
- if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
1772
- throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`);
1773
- }
1774
- if (typeof output.error === "string") {
1775
- throw new Error(output.error);
1776
- }
1777
- if (output.error && "message" in output.error && typeof output.error.message === "string") {
1778
- throw new Error(output.error.message);
1779
- }
1780
- if (typeof output.message === "string") {
1781
- throw new Error(output.message);
1782
- }
1783
- }
1784
- throw new Error(`Server response contains error: ${response.status}`);
1785
- }
1786
- if (!response.headers.get("content-type")?.startsWith("text/event-stream")) {
1787
- throw new Error(
1788
- `Server does not support event stream content type, it returned ` + response.headers.get("content-type")
1789
- );
1790
- }
1791
- if (!response.body) {
1792
- return;
1793
- }
1794
- const reader = response.body.getReader();
1795
- let events = [];
1796
- const onEvent = (event) => {
1797
- events.push(event);
1798
- };
1799
- const onChunk = getLines(
1800
- getMessages(
1801
- () => {
1802
- },
1803
- () => {
1804
- },
1805
- onEvent
1806
- )
1807
- );
1808
- try {
1809
- while (true) {
1810
- const { done, value } = await reader.read();
1811
- if (done) {
1812
- return;
1813
- }
1814
- onChunk(value);
1815
- for (const event of events) {
1816
- if (event.data.length > 0) {
1817
- if (event.data === "[DONE]") {
1818
- return;
1819
- }
1820
- const data = JSON.parse(event.data);
1821
- if (typeof data === "object" && data !== null && "error" in data) {
1822
- const errorStr = typeof data.error === "string" ? data.error : typeof data.error === "object" && data.error && "message" in data.error && typeof data.error.message === "string" ? data.error.message : JSON.stringify(data.error);
1823
- throw new Error(`Error forwarded from backend: ` + errorStr);
1824
- }
1825
- yield data;
1826
- }
1827
- }
1828
- events = [];
1829
- }
1830
- } finally {
1831
- reader.releaseLock();
1832
- }
1833
- }
1834
-
1835
- // src/tasks/custom/request.ts
1836
- async function request(args, options) {
1837
- console.warn(
1838
- "The request method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
1839
- );
1840
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1841
- const providerHelper = getProviderHelper(provider, options?.task);
1842
- const result = await innerRequest(args, providerHelper, options);
1843
- return result.data;
1844
- }
1845
-
1846
- // src/tasks/custom/streamingRequest.ts
1847
- async function* streamingRequest(args, options) {
1848
- console.warn(
1849
- "The streamingRequest method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
1850
- );
1851
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1852
- const providerHelper = getProviderHelper(provider, options?.task);
1853
- yield* innerStreamingRequest(args, providerHelper, options);
1854
- }
1855
-
1856
- // src/tasks/audio/utils.ts
1857
- function preparePayload(args) {
1858
- return "data" in args ? args : {
1859
- ...omit(args, "inputs"),
1860
- data: args.inputs
1861
- };
1862
- }
1863
-
1864
- // src/tasks/audio/audioClassification.ts
1865
- async function audioClassification(args, options) {
1866
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1867
- const providerHelper = getProviderHelper(provider, "audio-classification");
1868
- const payload = preparePayload(args);
1869
- const { data: res } = await innerRequest(payload, providerHelper, {
1870
- ...options,
1871
- task: "audio-classification"
1872
- });
1873
- return providerHelper.getResponse(res);
1874
- }
1875
-
1876
- // src/tasks/audio/audioToAudio.ts
1877
- async function audioToAudio(args, options) {
1878
- const model = "inputs" in args ? args.model : void 0;
1879
- const provider = await resolveProvider(args.provider, model);
1880
- const providerHelper = getProviderHelper(provider, "audio-to-audio");
1881
- const payload = preparePayload(args);
1882
- const { data: res } = await innerRequest(payload, providerHelper, {
1883
- ...options,
1884
- task: "audio-to-audio"
1885
- });
1886
- return providerHelper.getResponse(res);
1887
- }
1888
-
1889
- // src/tasks/audio/automaticSpeechRecognition.ts
1890
- async function automaticSpeechRecognition(args, options) {
1891
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1892
- const providerHelper = getProviderHelper(provider, "automatic-speech-recognition");
1893
- const payload = await providerHelper.preparePayloadAsync(args);
1894
- const { data: res } = await innerRequest(payload, providerHelper, {
1895
- ...options,
1896
- task: "automatic-speech-recognition"
1897
- });
1898
- const isValidOutput = typeof res?.text === "string";
1899
- if (!isValidOutput) {
1900
- throw new InferenceOutputError("Expected {text: string}");
1901
- }
1902
- return providerHelper.getResponse(res);
1903
- }
1904
-
1905
- // src/tasks/audio/textToSpeech.ts
1906
- async function textToSpeech(args, options) {
1907
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1908
- const providerHelper = getProviderHelper(provider, "text-to-speech");
1909
- const { data: res } = await innerRequest(args, providerHelper, {
1910
- ...options,
1911
- task: "text-to-speech"
1912
- });
1913
- return providerHelper.getResponse(res);
1914
- }
1915
-
1916
- // src/tasks/cv/utils.ts
1917
- function preparePayload2(args) {
1918
- return "data" in args ? args : { ...omit(args, "inputs"), data: args.inputs };
1919
- }
1920
-
1921
- // src/tasks/cv/imageClassification.ts
1922
- async function imageClassification(args, options) {
1923
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1924
- const providerHelper = getProviderHelper(provider, "image-classification");
1925
- const payload = preparePayload2(args);
1926
- const { data: res } = await innerRequest(payload, providerHelper, {
1927
- ...options,
1928
- task: "image-classification"
1929
- });
1930
- return providerHelper.getResponse(res);
1931
- }
1932
-
1933
- // src/tasks/cv/imageSegmentation.ts
1934
- async function imageSegmentation(args, options) {
1935
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1936
- const providerHelper = getProviderHelper(provider, "image-segmentation");
1937
- const payload = preparePayload2(args);
1938
- const { data: res } = await innerRequest(payload, providerHelper, {
1939
- ...options,
1940
- task: "image-segmentation"
1941
- });
1942
- return providerHelper.getResponse(res);
1943
- }
1944
-
1945
- // src/tasks/cv/imageToImage.ts
1946
- async function imageToImage(args, options) {
1947
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1948
- const providerHelper = getProviderHelper(provider, "image-to-image");
1949
- const payload = await providerHelper.preparePayloadAsync(args);
1950
- const { data: res } = await innerRequest(payload, providerHelper, {
1951
- ...options,
1952
- task: "image-to-image"
1953
- });
1954
- return providerHelper.getResponse(res);
1955
- }
1956
-
1957
- // src/tasks/cv/imageToText.ts
1958
- async function imageToText(args, options) {
1959
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1960
- const providerHelper = getProviderHelper(provider, "image-to-text");
1961
- const payload = preparePayload2(args);
1962
- const { data: res } = await innerRequest(payload, providerHelper, {
1963
- ...options,
1964
- task: "image-to-text"
1965
- });
1966
- return providerHelper.getResponse(res[0]);
1967
- }
1968
-
1969
- // src/tasks/cv/objectDetection.ts
1970
- async function objectDetection(args, options) {
1971
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1972
- const providerHelper = getProviderHelper(provider, "object-detection");
1973
- const payload = preparePayload2(args);
1974
- const { data: res } = await innerRequest(payload, providerHelper, {
1975
- ...options,
1976
- task: "object-detection"
1977
- });
1978
- return providerHelper.getResponse(res);
1979
- }
1980
-
1981
- // src/tasks/cv/textToImage.ts
1982
- async function textToImage(args, options) {
1983
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1984
- const providerHelper = getProviderHelper(provider, "text-to-image");
1985
- const { data: res } = await innerRequest(args, providerHelper, {
1986
- ...options,
1987
- task: "text-to-image"
1988
- });
1989
- const { url, info } = await makeRequestOptions(args, providerHelper, { ...options, task: "text-to-image" });
1990
- return providerHelper.getResponse(res, url, info.headers, options?.outputType);
1991
- }
1992
-
1993
- // src/tasks/cv/textToVideo.ts
1994
- async function textToVideo(args, options) {
1995
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
1996
- const providerHelper = getProviderHelper(provider, "text-to-video");
1997
- const { data: response } = await innerRequest(
1998
- args,
1999
- providerHelper,
2000
- {
2001
- ...options,
2002
- task: "text-to-video"
2003
- }
2004
- );
2005
- const { url, info } = await makeRequestOptions(args, providerHelper, { ...options, task: "text-to-video" });
2006
- return providerHelper.getResponse(response, url, info.headers);
2007
- }
2008
-
2009
- // src/tasks/cv/zeroShotImageClassification.ts
2010
- async function preparePayload3(args) {
2011
- if (args.inputs instanceof Blob) {
2012
- return {
2013
- ...args,
2014
- inputs: {
2015
- image: base64FromBytes(new Uint8Array(await args.inputs.arrayBuffer()))
2016
- }
2017
- };
2018
- } else {
2019
- return {
2020
- ...args,
2021
- inputs: {
2022
- image: base64FromBytes(
2023
- new Uint8Array(
2024
- args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer()
2025
- )
2026
- )
2027
- }
2028
- };
2029
- }
2030
- }
2031
- async function zeroShotImageClassification(args, options) {
2032
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2033
- const providerHelper = getProviderHelper(provider, "zero-shot-image-classification");
2034
- const payload = await preparePayload3(args);
2035
- const { data: res } = await innerRequest(payload, providerHelper, {
2036
- ...options,
2037
- task: "zero-shot-image-classification"
2038
- });
2039
- return providerHelper.getResponse(res);
2040
- }
2041
-
2042
- // src/tasks/nlp/chatCompletion.ts
2043
- async function chatCompletion(args, options) {
2044
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2045
- const providerHelper = getProviderHelper(provider, "conversational");
2046
- const { data: response } = await innerRequest(args, providerHelper, {
2047
- ...options,
2048
- task: "conversational"
2049
- });
2050
- return providerHelper.getResponse(response);
2051
- }
2052
-
2053
- // src/tasks/nlp/chatCompletionStream.ts
2054
- async function* chatCompletionStream(args, options) {
2055
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2056
- const providerHelper = getProviderHelper(provider, "conversational");
2057
- yield* innerStreamingRequest(args, providerHelper, {
2058
- ...options,
2059
- task: "conversational"
2060
- });
2061
- }
2062
-
2063
- // src/tasks/nlp/featureExtraction.ts
2064
- async function featureExtraction(args, options) {
2065
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2066
- const providerHelper = getProviderHelper(provider, "feature-extraction");
2067
- const { data: res } = await innerRequest(args, providerHelper, {
2068
- ...options,
2069
- task: "feature-extraction"
2070
- });
2071
- return providerHelper.getResponse(res);
2072
- }
2073
-
2074
- // src/tasks/nlp/fillMask.ts
2075
- async function fillMask(args, options) {
2076
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2077
- const providerHelper = getProviderHelper(provider, "fill-mask");
2078
- const { data: res } = await innerRequest(args, providerHelper, {
2079
- ...options,
2080
- task: "fill-mask"
2081
- });
2082
- return providerHelper.getResponse(res);
2083
- }
2084
-
2085
- // src/tasks/nlp/questionAnswering.ts
2086
- async function questionAnswering(args, options) {
2087
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2088
- const providerHelper = getProviderHelper(provider, "question-answering");
2089
- const { data: res } = await innerRequest(
2090
- args,
2091
- providerHelper,
2092
- {
2093
- ...options,
2094
- task: "question-answering"
2095
- }
2096
- );
2097
- return providerHelper.getResponse(res);
2098
- }
2099
-
2100
- // src/tasks/nlp/sentenceSimilarity.ts
2101
- async function sentenceSimilarity(args, options) {
2102
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2103
- const providerHelper = getProviderHelper(provider, "sentence-similarity");
2104
- const { data: res } = await innerRequest(args, providerHelper, {
2105
- ...options,
2106
- task: "sentence-similarity"
2107
- });
2108
- return providerHelper.getResponse(res);
2109
- }
2110
-
2111
- // src/tasks/nlp/summarization.ts
2112
- async function summarization(args, options) {
2113
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2114
- const providerHelper = getProviderHelper(provider, "summarization");
2115
- const { data: res } = await innerRequest(args, providerHelper, {
2116
- ...options,
2117
- task: "summarization"
2118
- });
2119
- return providerHelper.getResponse(res);
2120
- }
2121
-
2122
- // src/tasks/nlp/tableQuestionAnswering.ts
2123
- async function tableQuestionAnswering(args, options) {
2124
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2125
- const providerHelper = getProviderHelper(provider, "table-question-answering");
2126
- const { data: res } = await innerRequest(
2127
- args,
2128
- providerHelper,
2129
- {
2130
- ...options,
2131
- task: "table-question-answering"
2132
- }
2133
- );
2134
- return providerHelper.getResponse(res);
2135
- }
2136
-
2137
- // src/tasks/nlp/textClassification.ts
2138
- async function textClassification(args, options) {
2139
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2140
- const providerHelper = getProviderHelper(provider, "text-classification");
2141
- const { data: res } = await innerRequest(args, providerHelper, {
2142
- ...options,
2143
- task: "text-classification"
2144
- });
2145
- return providerHelper.getResponse(res);
2146
- }
2147
-
2148
- // src/tasks/nlp/textGeneration.ts
2149
- async function textGeneration(args, options) {
2150
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2151
- const providerHelper = getProviderHelper(provider, "text-generation");
2152
- const { data: response } = await innerRequest(args, providerHelper, {
2153
- ...options,
2154
- task: "text-generation"
2155
- });
2156
- return providerHelper.getResponse(response);
2157
- }
2158
-
2159
- // src/tasks/nlp/textGenerationStream.ts
2160
- async function* textGenerationStream(args, options) {
2161
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2162
- const providerHelper = getProviderHelper(provider, "text-generation");
2163
- yield* innerStreamingRequest(args, providerHelper, {
2164
- ...options,
2165
- task: "text-generation"
2166
- });
2167
- }
2168
-
2169
- // src/tasks/nlp/tokenClassification.ts
2170
- async function tokenClassification(args, options) {
2171
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2172
- const providerHelper = getProviderHelper(provider, "token-classification");
2173
- const { data: res } = await innerRequest(
2174
- args,
2175
- providerHelper,
2176
- {
2177
- ...options,
2178
- task: "token-classification"
2179
- }
2180
- );
2181
- return providerHelper.getResponse(res);
2182
- }
2183
-
2184
- // src/tasks/nlp/translation.ts
2185
- async function translation(args, options) {
2186
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2187
- const providerHelper = getProviderHelper(provider, "translation");
2188
- const { data: res } = await innerRequest(args, providerHelper, {
2189
- ...options,
2190
- task: "translation"
2191
- });
2192
- return providerHelper.getResponse(res);
2193
- }
2194
-
2195
- // src/tasks/nlp/zeroShotClassification.ts
2196
- async function zeroShotClassification(args, options) {
2197
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2198
- const providerHelper = getProviderHelper(provider, "zero-shot-classification");
2199
- const { data: res } = await innerRequest(
2200
- args,
2201
- providerHelper,
2202
- {
2203
- ...options,
2204
- task: "zero-shot-classification"
2205
- }
2206
- );
2207
- return providerHelper.getResponse(res);
2208
- }
2209
-
2210
- // src/tasks/multimodal/documentQuestionAnswering.ts
2211
- async function documentQuestionAnswering(args, options) {
2212
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2213
- const providerHelper = getProviderHelper(provider, "document-question-answering");
2214
- const reqArgs = {
2215
- ...args,
2216
- inputs: {
2217
- question: args.inputs.question,
2218
- // convert Blob or ArrayBuffer to base64
2219
- image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
2220
- }
2221
- };
2222
- const { data: res } = await innerRequest(
2223
- reqArgs,
2224
- providerHelper,
2225
- {
2226
- ...options,
2227
- task: "document-question-answering"
2228
- }
2229
- );
2230
- return providerHelper.getResponse(res);
2231
- }
2232
-
2233
- // src/tasks/multimodal/visualQuestionAnswering.ts
2234
- async function visualQuestionAnswering(args, options) {
2235
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2236
- const providerHelper = getProviderHelper(provider, "visual-question-answering");
2237
- const reqArgs = {
2238
- ...args,
2239
- inputs: {
2240
- question: args.inputs.question,
2241
- // convert Blob or ArrayBuffer to base64
2242
- image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
2243
- }
2244
- };
2245
- const { data: res } = await innerRequest(reqArgs, providerHelper, {
2246
- ...options,
2247
- task: "visual-question-answering"
2248
- });
2249
- return providerHelper.getResponse(res);
2250
- }
2251
-
2252
- // src/tasks/tabular/tabularClassification.ts
2253
- async function tabularClassification(args, options) {
2254
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2255
- const providerHelper = getProviderHelper(provider, "tabular-classification");
2256
- const { data: res } = await innerRequest(args, providerHelper, {
2257
- ...options,
2258
- task: "tabular-classification"
2259
- });
2260
- return providerHelper.getResponse(res);
2261
- }
2262
-
2263
- // src/tasks/tabular/tabularRegression.ts
2264
- async function tabularRegression(args, options) {
2265
- const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
2266
- const providerHelper = getProviderHelper(provider, "tabular-regression");
2267
- const { data: res } = await innerRequest(args, providerHelper, {
2268
- ...options,
2269
- task: "tabular-regression"
2270
- });
2271
- return providerHelper.getResponse(res);
2272
- }
2273
-
2274
- // src/utils/typedEntries.ts
2275
- function typedEntries(obj) {
2276
- return Object.entries(obj);
2277
- }
2278
-
2279
- // src/InferenceClient.ts
2280
- var InferenceClient = class {
2281
- accessToken;
2282
- defaultOptions;
2283
- constructor(accessToken = "", defaultOptions = {}) {
2284
- this.accessToken = accessToken;
2285
- this.defaultOptions = defaultOptions;
2286
- for (const [name2, fn] of typedEntries(tasks_exports)) {
2287
- Object.defineProperty(this, name2, {
2288
- enumerable: false,
2289
- value: (params, options) => (
2290
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
2291
- fn(
2292
- /// ^ The cast of fn to any is necessary, otherwise TS can't compile because the generated union type is too complex
2293
- { endpointUrl: defaultOptions.endpointUrl, accessToken, ...params },
2294
- {
2295
- ...omit(defaultOptions, ["endpointUrl"]),
2296
- ...options
2297
- }
2298
- )
2299
- )
2300
- });
2301
- }
2302
- }
2303
- /**
2304
- * Returns a new instance of InferenceClient tied to a specified endpoint.
2305
- *
2306
- * For backward compatibility mostly.
2307
- */
2308
- endpoint(endpointUrl) {
2309
- return new InferenceClient(this.accessToken, { ...this.defaultOptions, endpointUrl });
2310
- }
2311
- };
2312
- var HfInference = class extends InferenceClient {
2313
- };
2314
- var InferenceClientEndpoint = class extends InferenceClient {
2315
- };
2316
-
2317
- // src/types.ts
2318
- var INFERENCE_PROVIDERS = [
2319
- "black-forest-labs",
2320
- "cerebras",
2321
- "cohere",
2322
- "fal-ai",
2323
- "featherless-ai",
2324
- "fireworks-ai",
2325
- "groq",
2326
- "hf-inference",
2327
- "hyperbolic",
2328
- "nebius",
2329
- "novita",
2330
- "nscale",
2331
- "openai",
2332
- "ovhcloud",
2333
- "replicate",
2334
- "sambanova",
2335
- "together"
2336
- ];
2337
- var PROVIDERS_OR_POLICIES = [...INFERENCE_PROVIDERS, "auto"];
2338
-
2339
- // src/snippets/index.ts
2340
- var snippets_exports = {};
2341
- __export(snippets_exports, {
2342
- getInferenceSnippets: () => getInferenceSnippets
2343
- });
2344
-
2345
- // src/snippets/getInferenceSnippets.ts
2346
- import { Template } from "@huggingface/jinja";
2347
- import {
2348
- getModelInputSnippet,
2349
- inferenceSnippetLanguages
2350
- } from "@huggingface/tasks";
2351
-
2352
- // src/snippets/templates.exported.ts
2353
- var templates = {
2354
- "js": {
2355
- "fetch": {
2356
- "basic": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
2357
- "basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
2358
- "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
2359
- "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2360
- "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
2361
- "textToSpeech": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
2362
- "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}",\n{% endif %} },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
2363
- },
2364
- "huggingface.js": {
2365
- "basic": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst output = await client.{{ methodName }}({\n model: "{{ model.id }}",\n inputs: {{ inputs.asObj.inputs }},\n provider: "{{ provider }}",\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(output);',
2366
- "basicAudio": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(output);',
2367
- "basicImage": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(output);',
2368
- "conversational": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst chatCompletion = await client.chatCompletion({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);',
2369
- "conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream = client.chatCompletionStream({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n }\n}',
2370
- "textToImage": `import { InferenceClient } from "@huggingface/inference";
2371
-
2372
- const client = new InferenceClient("{{ accessToken }}");
2373
-
2374
- const image = await client.textToImage({
2375
- provider: "{{ provider }}",
2376
- model: "{{ model.id }}",
2377
- inputs: {{ inputs.asObj.inputs }},
2378
- parameters: { num_inference_steps: 5 },
2379
- }{% if billTo %}, {
2380
- billTo: "{{ billTo }}",
2381
- }{% endif %});
2382
- /// Use the generated image (it's a Blob)`,
2383
- "textToSpeech": `import { InferenceClient } from "@huggingface/inference";
2384
-
2385
- const client = new InferenceClient("{{ accessToken }}");
2386
-
2387
- const audio = await client.textToSpeech({
2388
- provider: "{{ provider }}",
2389
- model: "{{ model.id }}",
2390
- inputs: {{ inputs.asObj.inputs }},
2391
- }{% if billTo %}, {
2392
- billTo: "{{ billTo }}",
2393
- }{% endif %});
2394
- // Use the generated audio (it's a Blob)`,
2395
- "textToVideo": `import { InferenceClient } from "@huggingface/inference";
2396
-
2397
- const client = new InferenceClient("{{ accessToken }}");
2398
-
2399
- const video = await client.textToVideo({
2400
- provider: "{{ provider }}",
2401
- model: "{{ model.id }}",
2402
- inputs: {{ inputs.asObj.inputs }},
2403
- }{% if billTo %}, {
2404
- billTo: "{{ billTo }}",
2405
- }{% endif %});
2406
- // Use the generated video (it's a Blob)`
2407
- },
2408
- "openai": {
2409
- "conversational": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n{% if billTo %}\n defaultHeaders: {\n "X-HF-Bill-To": "{{ billTo }}" \n }\n{% endif %}\n});\n\nconst chatCompletion = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
2410
- "conversationalStream": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n{% if billTo %}\n defaultHeaders: {\n "X-HF-Bill-To": "{{ billTo }}" \n }\n{% endif %}\n});\n\nconst stream = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n stream: true,\n});\n\nfor await (const chunk of stream) {\n process.stdout.write(chunk.choices[0]?.delta?.content || "");\n}'
2411
- }
2412
- },
2413
- "python": {
2414
- "fal_client": {
2415
- "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n "loras":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} '
2416
- },
2417
- "huggingface_hub": {
2418
- "basic": 'result = client.{{ methodName }}(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
2419
- "basicAudio": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
2420
- "basicImage": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
2421
- "conversational": 'completion = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
2422
- "conversationalStream": 'stream = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="") ',
2423
- "documentQuestionAnswering": 'output = client.document_question_answering(\n "{{ inputs.asObj.image }}",\n question="{{ inputs.asObj.question }}",\n model="{{ model.id }}",\n) ',
2424
- "imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
2425
- "importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n bill_to="{{ billTo }}",\n{% endif %}\n)',
2426
- "questionAnswering": 'answer = client.question_answering(\n question="{{ inputs.asObj.question }}",\n context="{{ inputs.asObj.context }}",\n model="{{ model.id }}",\n) ',
2427
- "tableQuestionAnswering": 'answer = client.question_answering(\n query="{{ inputs.asObj.query }}",\n table={{ inputs.asObj.table }},\n model="{{ model.id }}",\n) ',
2428
- "textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
2429
- "textToSpeech": '# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) \n',
2430
- "textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
2431
- },
2432
- "openai": {
2433
- "conversational": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n default_headers={\n "X-HF-Bill-To": "{{ billTo }}"\n }\n{% endif %}\n)\n\ncompletion = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
2434
- "conversationalStream": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n default_headers={\n "X-HF-Bill-To": "{{ billTo }}"\n }\n{% endif %}\n)\n\nstream = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="")'
2435
- },
2436
- "requests": {
2437
- "basic": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n}) ',
2438
- "basicAudio": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "audio/flac", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
2439
- "basicImage": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "image/jpeg", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
2440
- "conversational": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response["choices"][0]["message"])',
2441
- "conversationalStream": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b"data:"):\n continue\n if line.strip() == b"data: [DONE]":\n return\n yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n "stream": True,\n})\n\nfor chunk in chunks:\n print(chunk["choices"][0]["delta"]["content"], end="")',
2442
- "documentQuestionAnswering": 'def query(payload):\n with open(payload["image"], "rb") as f:\n img = f.read()\n payload["image"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {\n "image": "{{ inputs.asObj.image }}",\n "question": "{{ inputs.asObj.question }}",\n },\n}) ',
2443
- "imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
2444
- "importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {\n "Authorization": "{{ authorizationHeader }}",\n{% if billTo %}\n "X-HF-Bill-To": "{{ billTo }}"\n{% endif %}\n}',
2445
- "tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
2446
- "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2447
- "textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
2448
- "textToSpeech": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "text": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
2449
- "zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
2450
- "zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
2451
- }
2452
- },
2453
- "sh": {
2454
- "curl": {
2455
- "basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n{% if billTo %}\n -H 'X-HF-Bill-To: {{ billTo }}' \\\n{% endif %}\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
2456
- "basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n{% if billTo %}\n -H 'X-HF-Bill-To: {{ billTo }}' \\\n{% endif %}\n --data-binary @{{ providerInputs.asObj.inputs }}",
2457
- "basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n{% if billTo %}\n -H 'X-HF-Bill-To: {{ billTo }}' \\\n{% endif %}\n --data-binary @{{ providerInputs.asObj.inputs }}",
2458
- "conversational": `curl {{ fullUrl }} \\
2459
- -H 'Authorization: {{ authorizationHeader }}' \\
2460
- -H 'Content-Type: application/json' \\
2461
- {% if billTo %}
2462
- -H 'X-HF-Bill-To: {{ billTo }}' \\
2463
- {% endif %}
2464
- -d '{
2465
- {{ providerInputs.asCurlString }},
2466
- "stream": false
2467
- }'`,
2468
- "conversationalStream": `curl {{ fullUrl }} \\
2469
- -H 'Authorization: {{ authorizationHeader }}' \\
2470
- -H 'Content-Type: application/json' \\
2471
- {% if billTo %}
2472
- -H 'X-HF-Bill-To: {{ billTo }}' \\
2473
- {% endif %}
2474
- -d '{
2475
- {{ providerInputs.asCurlString }},
2476
- "stream": true
2477
- }'`,
2478
- "zeroShotClassification": `curl {{ fullUrl }} \\
2479
- -X POST \\
2480
- -d '{"inputs": {{ providerInputs.asObj.inputs }}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
2481
- -H 'Content-Type: application/json' \\
2482
- -H 'Authorization: {{ authorizationHeader }}'
2483
- {% if billTo %} \\
2484
- -H 'X-HF-Bill-To: {{ billTo }}'
2485
- {% endif %}`
2486
- }
2487
- }
2488
- };
2489
-
2490
- // src/snippets/getInferenceSnippets.ts
2491
- var PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"];
2492
- var JS_CLIENTS = ["fetch", "huggingface.js", "openai"];
2493
- var SH_CLIENTS = ["curl"];
2494
- var CLIENTS = {
2495
- js: [...JS_CLIENTS],
2496
- python: [...PYTHON_CLIENTS],
2497
- sh: [...SH_CLIENTS]
2498
- };
2499
- var CLIENTS_AUTO_POLICY = {
2500
- js: ["huggingface.js"],
2501
- python: ["huggingface_hub"]
2502
- };
2503
- var hasTemplate = (language, client, templateName) => templates[language]?.[client]?.[templateName] !== void 0;
2504
- var loadTemplate = (language, client, templateName) => {
2505
- const template = templates[language]?.[client]?.[templateName];
2506
- if (!template) {
2507
- throw new Error(`Template not found: ${language}/${client}/${templateName}`);
2508
- }
2509
- return (data) => new Template(template).render({ ...data });
2510
- };
2511
- var snippetImportPythonInferenceClient = loadTemplate("python", "huggingface_hub", "importInferenceClient");
2512
- var snippetImportRequests = loadTemplate("python", "requests", "importRequests");
2513
- var HF_PYTHON_METHODS = {
2514
- "audio-classification": "audio_classification",
2515
- "audio-to-audio": "audio_to_audio",
2516
- "automatic-speech-recognition": "automatic_speech_recognition",
2517
- "document-question-answering": "document_question_answering",
2518
- "feature-extraction": "feature_extraction",
2519
- "fill-mask": "fill_mask",
2520
- "image-classification": "image_classification",
2521
- "image-segmentation": "image_segmentation",
2522
- "image-to-image": "image_to_image",
2523
- "image-to-text": "image_to_text",
2524
- "object-detection": "object_detection",
2525
- "question-answering": "question_answering",
2526
- "sentence-similarity": "sentence_similarity",
2527
- summarization: "summarization",
2528
- "table-question-answering": "table_question_answering",
2529
- "tabular-classification": "tabular_classification",
2530
- "tabular-regression": "tabular_regression",
2531
- "text-classification": "text_classification",
2532
- "text-generation": "text_generation",
2533
- "text-to-image": "text_to_image",
2534
- "text-to-speech": "text_to_speech",
2535
- "text-to-video": "text_to_video",
2536
- "token-classification": "token_classification",
2537
- translation: "translation",
2538
- "visual-question-answering": "visual_question_answering",
2539
- "zero-shot-classification": "zero_shot_classification",
2540
- "zero-shot-image-classification": "zero_shot_image_classification"
2541
- };
2542
- var HF_JS_METHODS = {
2543
- "automatic-speech-recognition": "automaticSpeechRecognition",
2544
- "feature-extraction": "featureExtraction",
2545
- "fill-mask": "fillMask",
2546
- "image-classification": "imageClassification",
2547
- "question-answering": "questionAnswering",
2548
- "sentence-similarity": "sentenceSimilarity",
2549
- summarization: "summarization",
2550
- "table-question-answering": "tableQuestionAnswering",
2551
- "text-classification": "textClassification",
2552
- "text-generation": "textGeneration",
2553
- "text2text-generation": "textGeneration",
2554
- "token-classification": "tokenClassification",
2555
- "text-to-speech": "textToSpeech",
2556
- translation: "translation"
2557
- };
2558
- var snippetGenerator = (templateName, inputPreparationFn) => {
2559
- return (model, accessToken, provider, inferenceProviderMapping, opts) => {
2560
- const providerModelId = inferenceProviderMapping?.providerId ?? model.id;
2561
- let task = model.pipeline_tag;
2562
- if (model.pipeline_tag && ["text-generation", "image-text-to-text"].includes(model.pipeline_tag) && model.tags.includes("conversational")) {
2563
- templateName = opts?.streaming ? "conversationalStream" : "conversational";
2564
- inputPreparationFn = prepareConversationalInput;
2565
- task = "conversational";
2566
- }
2567
- let providerHelper;
2568
- try {
2569
- providerHelper = getProviderHelper(provider, task);
2570
- } catch (e) {
2571
- console.error(`Failed to get provider helper for ${provider} (${task})`, e);
2572
- return [];
2573
- }
2574
- const inputs = inputPreparationFn ? inputPreparationFn(model, opts) : { inputs: getModelInputSnippet(model) };
2575
- const request2 = makeRequestOptionsFromResolvedModel(
2576
- providerModelId,
2577
- providerHelper,
2578
- {
2579
- accessToken,
2580
- provider,
2581
- ...inputs
2582
- },
2583
- inferenceProviderMapping,
2584
- {
2585
- task,
2586
- billTo: opts?.billTo
2587
- }
2588
- );
2589
- let providerInputs = inputs;
2590
- const bodyAsObj = request2.info.body;
2591
- if (typeof bodyAsObj === "string") {
2592
- try {
2593
- providerInputs = JSON.parse(bodyAsObj);
2594
- } catch (e) {
2595
- console.error("Failed to parse body as JSON", e);
2596
- }
2597
- }
2598
- const params = {
2599
- accessToken,
2600
- authorizationHeader: request2.info.headers?.Authorization,
2601
- baseUrl: removeSuffix(request2.url, "/chat/completions"),
2602
- fullUrl: request2.url,
2603
- inputs: {
2604
- asObj: inputs,
2605
- asCurlString: formatBody(inputs, "curl"),
2606
- asJsonString: formatBody(inputs, "json"),
2607
- asPythonString: formatBody(inputs, "python"),
2608
- asTsString: formatBody(inputs, "ts")
2609
- },
2610
- providerInputs: {
2611
- asObj: providerInputs,
2612
- asCurlString: formatBody(providerInputs, "curl"),
2613
- asJsonString: formatBody(providerInputs, "json"),
2614
- asPythonString: formatBody(providerInputs, "python"),
2615
- asTsString: formatBody(providerInputs, "ts")
2616
- },
2617
- model,
2618
- provider,
2619
- providerModelId: providerModelId ?? model.id,
2620
- billTo: opts?.billTo
2621
- };
2622
- const clients = provider === "auto" ? CLIENTS_AUTO_POLICY : CLIENTS;
2623
- return inferenceSnippetLanguages.map((language) => {
2624
- const langClients = clients[language] ?? [];
2625
- return langClients.map((client) => {
2626
- if (!hasTemplate(language, client, templateName)) {
2627
- return;
2628
- }
2629
- const template = loadTemplate(language, client, templateName);
2630
- if (client === "huggingface_hub" && templateName.includes("basic")) {
2631
- if (!(model.pipeline_tag && model.pipeline_tag in HF_PYTHON_METHODS)) {
2632
- return;
2633
- }
2634
- params["methodName"] = HF_PYTHON_METHODS[model.pipeline_tag];
2635
- }
2636
- if (client === "huggingface.js" && templateName.includes("basic")) {
2637
- if (!(model.pipeline_tag && model.pipeline_tag in HF_JS_METHODS)) {
2638
- return;
2639
- }
2640
- params["methodName"] = HF_JS_METHODS[model.pipeline_tag];
2641
- }
2642
- let snippet = template(params).trim();
2643
- if (!snippet) {
2644
- return;
2645
- }
2646
- if (client === "huggingface_hub") {
2647
- const importSection = snippetImportPythonInferenceClient({ ...params });
2648
- snippet = `${importSection}
2649
-
2650
- ${snippet}`;
2651
- } else if (client === "requests") {
2652
- const importSection = snippetImportRequests({
2653
- ...params,
2654
- importBase64: snippet.includes("base64"),
2655
- importJson: snippet.includes("json.")
2656
- });
2657
- snippet = `${importSection}
2658
-
2659
- ${snippet}`;
2660
- }
2661
- return { language, client, content: snippet };
2662
- }).filter((snippet) => snippet !== void 0);
2663
- }).flat();
2664
- };
2665
- };
2666
- var prepareDocumentQuestionAnsweringInput = (model) => {
2667
- return JSON.parse(getModelInputSnippet(model));
2668
- };
2669
- var prepareImageToImageInput = (model) => {
2670
- const data = JSON.parse(getModelInputSnippet(model));
2671
- return { inputs: data.image, parameters: { prompt: data.prompt } };
2672
- };
2673
- var prepareConversationalInput = (model, opts) => {
2674
- return {
2675
- messages: opts?.messages ?? getModelInputSnippet(model),
2676
- ...opts?.temperature ? { temperature: opts?.temperature } : void 0,
2677
- ...opts?.max_tokens ? { max_tokens: opts?.max_tokens } : void 0,
2678
- ...opts?.top_p ? { top_p: opts?.top_p } : void 0
2679
- };
2680
- };
2681
- var prepareQuestionAnsweringInput = (model) => {
2682
- const data = JSON.parse(getModelInputSnippet(model));
2683
- return { question: data.question, context: data.context };
2684
- };
2685
- var prepareTableQuestionAnsweringInput = (model) => {
2686
- const data = JSON.parse(getModelInputSnippet(model));
2687
- return { query: data.query, table: JSON.stringify(data.table) };
2688
- };
2689
- var snippets = {
2690
- "audio-classification": snippetGenerator("basicAudio"),
2691
- "audio-to-audio": snippetGenerator("basicAudio"),
2692
- "automatic-speech-recognition": snippetGenerator("basicAudio"),
2693
- "document-question-answering": snippetGenerator("documentQuestionAnswering", prepareDocumentQuestionAnsweringInput),
2694
- "feature-extraction": snippetGenerator("basic"),
2695
- "fill-mask": snippetGenerator("basic"),
2696
- "image-classification": snippetGenerator("basicImage"),
2697
- "image-segmentation": snippetGenerator("basicImage"),
2698
- "image-text-to-text": snippetGenerator("conversational"),
2699
- "image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
2700
- "image-to-text": snippetGenerator("basicImage"),
2701
- "object-detection": snippetGenerator("basicImage"),
2702
- "question-answering": snippetGenerator("questionAnswering", prepareQuestionAnsweringInput),
2703
- "sentence-similarity": snippetGenerator("basic"),
2704
- summarization: snippetGenerator("basic"),
2705
- "tabular-classification": snippetGenerator("tabular"),
2706
- "tabular-regression": snippetGenerator("tabular"),
2707
- "table-question-answering": snippetGenerator("tableQuestionAnswering", prepareTableQuestionAnsweringInput),
2708
- "text-classification": snippetGenerator("basic"),
2709
- "text-generation": snippetGenerator("basic"),
2710
- "text-to-audio": snippetGenerator("textToAudio"),
2711
- "text-to-image": snippetGenerator("textToImage"),
2712
- "text-to-speech": snippetGenerator("textToSpeech"),
2713
- "text-to-video": snippetGenerator("textToVideo"),
2714
- "text2text-generation": snippetGenerator("basic"),
2715
- "token-classification": snippetGenerator("basic"),
2716
- translation: snippetGenerator("basic"),
2717
- "zero-shot-classification": snippetGenerator("zeroShotClassification"),
2718
- "zero-shot-image-classification": snippetGenerator("zeroShotImageClassification")
2719
- };
2720
- function getInferenceSnippets(model, accessToken, provider, inferenceProviderMapping, opts) {
2721
- return model.pipeline_tag && model.pipeline_tag in snippets ? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping, opts) ?? [] : [];
2722
- }
2723
- function formatBody(obj, format) {
2724
- switch (format) {
2725
- case "curl":
2726
- return indentString(formatBody(obj, "json"));
2727
- case "json":
2728
- return JSON.stringify(obj, null, 4).split("\n").slice(1, -1).join("\n");
2729
- case "python":
2730
- return indentString(
2731
- Object.entries(obj).map(([key, value]) => {
2732
- const formattedValue = JSON.stringify(value, null, 4).replace(/"/g, '"');
2733
- return `${key}=${formattedValue},`;
2734
- }).join("\n")
2735
- );
2736
- case "ts":
2737
- return formatTsObject(obj).split("\n").slice(1, -1).join("\n");
2738
- default:
2739
- throw new Error(`Unsupported format: ${format}`);
2740
- }
2741
- }
2742
- function formatTsObject(obj, depth) {
2743
- depth = depth ?? 0;
2744
- if (typeof obj !== "object" || obj === null) {
2745
- return JSON.stringify(obj);
2746
- }
2747
- if (Array.isArray(obj)) {
2748
- const items = obj.map((item) => {
2749
- const formatted = formatTsObject(item, depth + 1);
2750
- return `${" ".repeat(4 * (depth + 1))}${formatted},`;
2751
- }).join("\n");
2752
- return `[
2753
- ${items}
2754
- ${" ".repeat(4 * depth)}]`;
2755
- }
2756
- const entries = Object.entries(obj);
2757
- const lines = entries.map(([key, value]) => {
2758
- const formattedValue = formatTsObject(value, depth + 1);
2759
- const keyStr = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/.test(key) ? key : `"${key}"`;
2760
- return `${" ".repeat(4 * (depth + 1))}${keyStr}: ${formattedValue},`;
2761
- }).join("\n");
2762
- return `{
2763
- ${lines}
2764
- ${" ".repeat(4 * depth)}}`;
2765
- }
2766
- function indentString(str) {
2767
- return str.split("\n").map((line) => " ".repeat(4) + line).join("\n");
2768
- }
2769
- function removeSuffix(str, suffix) {
2770
- return str.endsWith(suffix) ? str.slice(0, -suffix.length) : str;
2771
- }
2772
- export {
2773
- HfInference,
2774
- INFERENCE_PROVIDERS,
2775
- InferenceClient,
2776
- InferenceClientEndpoint,
2777
- InferenceOutputError,
2778
- PROVIDERS_OR_POLICIES,
2779
- audioClassification,
2780
- audioToAudio,
2781
- automaticSpeechRecognition,
2782
- chatCompletion,
2783
- chatCompletionStream,
2784
- documentQuestionAnswering,
2785
- featureExtraction,
2786
- fillMask,
2787
- imageClassification,
2788
- imageSegmentation,
2789
- imageToImage,
2790
- imageToText,
2791
- objectDetection,
2792
- questionAnswering,
2793
- request,
2794
- sentenceSimilarity,
2795
- snippets_exports as snippets,
2796
- streamingRequest,
2797
- summarization,
2798
- tableQuestionAnswering,
2799
- tabularClassification,
2800
- tabularRegression,
2801
- textClassification,
2802
- textGeneration,
2803
- textGenerationStream,
2804
- textToImage,
2805
- textToSpeech,
2806
- textToVideo,
2807
- tokenClassification,
2808
- translation,
2809
- visualQuestionAnswering,
2810
- zeroShotClassification,
2811
- zeroShotImageClassification
2812
- };