@workglow/ai-provider 0.0.121 → 0.0.122

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (541) hide show
  1. package/README.md +30 -67
  2. package/dist/common/HfModelSearch.d.ts +32 -0
  3. package/dist/common/HfModelSearch.d.ts.map +1 -0
  4. package/dist/common/PipelineTaskMapping.d.ts +12 -0
  5. package/dist/common/PipelineTaskMapping.d.ts.map +1 -0
  6. package/dist/{anthropic → provider-anthropic}/AnthropicProvider.d.ts +2 -14
  7. package/dist/provider-anthropic/AnthropicProvider.d.ts.map +1 -0
  8. package/dist/provider-anthropic/AnthropicQueuedProvider.d.ts +16 -0
  9. package/dist/provider-anthropic/AnthropicQueuedProvider.d.ts.map +1 -0
  10. package/dist/provider-anthropic/common/Anthropic_Client.d.ts +13 -0
  11. package/dist/provider-anthropic/common/Anthropic_Client.d.ts.map +1 -0
  12. package/dist/provider-anthropic/common/Anthropic_Constants.d.ts.map +1 -0
  13. package/dist/provider-anthropic/common/Anthropic_CountTokens.d.ts +10 -0
  14. package/dist/provider-anthropic/common/Anthropic_CountTokens.d.ts.map +1 -0
  15. package/dist/provider-anthropic/common/Anthropic_JobRunFns.d.ts +12 -0
  16. package/dist/provider-anthropic/common/Anthropic_JobRunFns.d.ts.map +1 -0
  17. package/dist/provider-anthropic/common/Anthropic_ModelInfo.d.ts +9 -0
  18. package/dist/provider-anthropic/common/Anthropic_ModelInfo.d.ts.map +1 -0
  19. package/dist/{anthropic → provider-anthropic}/common/Anthropic_ModelSchema.d.ts +31 -31
  20. package/dist/provider-anthropic/common/Anthropic_ModelSchema.d.ts.map +1 -0
  21. package/dist/provider-anthropic/common/Anthropic_ModelSearch.d.ts +8 -0
  22. package/dist/provider-anthropic/common/Anthropic_ModelSearch.d.ts.map +1 -0
  23. package/dist/provider-anthropic/common/Anthropic_StructuredGeneration.d.ts +10 -0
  24. package/dist/provider-anthropic/common/Anthropic_StructuredGeneration.d.ts.map +1 -0
  25. package/dist/provider-anthropic/common/Anthropic_TextGeneration.d.ts +10 -0
  26. package/dist/provider-anthropic/common/Anthropic_TextGeneration.d.ts.map +1 -0
  27. package/dist/provider-anthropic/common/Anthropic_TextRewriter.d.ts +10 -0
  28. package/dist/provider-anthropic/common/Anthropic_TextRewriter.d.ts.map +1 -0
  29. package/dist/provider-anthropic/common/Anthropic_TextSummary.d.ts +10 -0
  30. package/dist/provider-anthropic/common/Anthropic_TextSummary.d.ts.map +1 -0
  31. package/dist/provider-anthropic/common/Anthropic_ToolCalling.d.ts +10 -0
  32. package/dist/provider-anthropic/common/Anthropic_ToolCalling.d.ts.map +1 -0
  33. package/dist/{anthropic → provider-anthropic}/index.d.ts +1 -3
  34. package/dist/provider-anthropic/index.d.ts.map +1 -0
  35. package/dist/{index-60ev6k93.js → provider-anthropic/index.js} +43 -11
  36. package/dist/provider-anthropic/index.js.map +13 -0
  37. package/dist/provider-anthropic/registerAnthropic.d.ts +10 -0
  38. package/dist/provider-anthropic/registerAnthropic.d.ts.map +1 -0
  39. package/dist/provider-anthropic/registerAnthropicInline.d.ts +8 -0
  40. package/dist/provider-anthropic/registerAnthropicInline.d.ts.map +1 -0
  41. package/dist/provider-anthropic/registerAnthropicWorker.d.ts +7 -0
  42. package/dist/provider-anthropic/registerAnthropicWorker.d.ts.map +1 -0
  43. package/dist/provider-anthropic/runtime.d.ts +16 -0
  44. package/dist/provider-anthropic/runtime.d.ts.map +1 -0
  45. package/dist/{anthropic/index.js → provider-anthropic/runtime.js} +291 -177
  46. package/dist/provider-anthropic/runtime.js.map +24 -0
  47. package/dist/{web-browser → provider-chrome}/WebBrowserProvider.d.ts +2 -15
  48. package/dist/provider-chrome/WebBrowserProvider.d.ts.map +1 -0
  49. package/dist/provider-chrome/WebBrowserQueuedProvider.d.ts +16 -0
  50. package/dist/provider-chrome/WebBrowserQueuedProvider.d.ts.map +1 -0
  51. package/dist/provider-chrome/common/WebBrowser_ChromeHelpers.d.ts +31 -0
  52. package/dist/provider-chrome/common/WebBrowser_ChromeHelpers.d.ts.map +1 -0
  53. package/dist/provider-chrome/common/WebBrowser_Constants.d.ts.map +1 -0
  54. package/dist/provider-chrome/common/WebBrowser_JobRunFns.d.ts +10 -0
  55. package/dist/provider-chrome/common/WebBrowser_JobRunFns.d.ts.map +1 -0
  56. package/dist/provider-chrome/common/WebBrowser_ModelInfo.d.ts +9 -0
  57. package/dist/provider-chrome/common/WebBrowser_ModelInfo.d.ts.map +1 -0
  58. package/dist/{web-browser → provider-chrome}/common/WebBrowser_ModelSchema.d.ts +31 -31
  59. package/dist/provider-chrome/common/WebBrowser_ModelSchema.d.ts.map +1 -0
  60. package/dist/provider-chrome/common/WebBrowser_ModelSearch.d.ts +8 -0
  61. package/dist/provider-chrome/common/WebBrowser_ModelSearch.d.ts.map +1 -0
  62. package/dist/provider-chrome/common/WebBrowser_TextGeneration.d.ts +10 -0
  63. package/dist/provider-chrome/common/WebBrowser_TextGeneration.d.ts.map +1 -0
  64. package/dist/provider-chrome/common/WebBrowser_TextLanguageDetection.d.ts +9 -0
  65. package/dist/provider-chrome/common/WebBrowser_TextLanguageDetection.d.ts.map +1 -0
  66. package/dist/provider-chrome/common/WebBrowser_TextRewriter.d.ts +10 -0
  67. package/dist/provider-chrome/common/WebBrowser_TextRewriter.d.ts.map +1 -0
  68. package/dist/provider-chrome/common/WebBrowser_TextSummary.d.ts +10 -0
  69. package/dist/provider-chrome/common/WebBrowser_TextSummary.d.ts.map +1 -0
  70. package/dist/provider-chrome/common/WebBrowser_TextTranslation.d.ts +10 -0
  71. package/dist/provider-chrome/common/WebBrowser_TextTranslation.d.ts.map +1 -0
  72. package/dist/{web-browser → provider-chrome}/index.d.ts +1 -3
  73. package/dist/provider-chrome/index.d.ts.map +1 -0
  74. package/dist/provider-chrome/index.js +132 -0
  75. package/dist/provider-chrome/index.js.map +13 -0
  76. package/dist/provider-chrome/registerWebBrowser.d.ts +10 -0
  77. package/dist/provider-chrome/registerWebBrowser.d.ts.map +1 -0
  78. package/dist/provider-chrome/registerWebBrowserInline.d.ts +8 -0
  79. package/dist/provider-chrome/registerWebBrowserInline.d.ts.map +1 -0
  80. package/dist/provider-chrome/registerWebBrowserWorker.d.ts +7 -0
  81. package/dist/provider-chrome/registerWebBrowserWorker.d.ts.map +1 -0
  82. package/dist/provider-chrome/runtime.d.ts +14 -0
  83. package/dist/provider-chrome/runtime.d.ts.map +1 -0
  84. package/dist/{web-browser/index.js → provider-chrome/runtime.js} +260 -235
  85. package/dist/provider-chrome/runtime.js.map +23 -0
  86. package/dist/{google-gemini → provider-gemini}/GoogleGeminiProvider.d.ts +2 -15
  87. package/dist/provider-gemini/GoogleGeminiProvider.d.ts.map +1 -0
  88. package/dist/provider-gemini/GoogleGeminiQueuedProvider.d.ts +16 -0
  89. package/dist/provider-gemini/GoogleGeminiQueuedProvider.d.ts.map +1 -0
  90. package/dist/provider-gemini/common/Gemini_Client.d.ts +10 -0
  91. package/dist/provider-gemini/common/Gemini_Client.d.ts.map +1 -0
  92. package/dist/provider-gemini/common/Gemini_Constants.d.ts.map +1 -0
  93. package/dist/provider-gemini/common/Gemini_CountTokens.d.ts +10 -0
  94. package/dist/provider-gemini/common/Gemini_CountTokens.d.ts.map +1 -0
  95. package/dist/provider-gemini/common/Gemini_JobRunFns.d.ts +13 -0
  96. package/dist/provider-gemini/common/Gemini_JobRunFns.d.ts.map +1 -0
  97. package/dist/provider-gemini/common/Gemini_ModelInfo.d.ts +9 -0
  98. package/dist/provider-gemini/common/Gemini_ModelInfo.d.ts.map +1 -0
  99. package/dist/{google-gemini → provider-gemini}/common/Gemini_ModelSchema.d.ts +31 -31
  100. package/dist/provider-gemini/common/Gemini_ModelSchema.d.ts.map +1 -0
  101. package/dist/provider-gemini/common/Gemini_ModelSearch.d.ts +8 -0
  102. package/dist/provider-gemini/common/Gemini_ModelSearch.d.ts.map +1 -0
  103. package/dist/provider-gemini/common/Gemini_Schema.d.ts +11 -0
  104. package/dist/provider-gemini/common/Gemini_Schema.d.ts.map +1 -0
  105. package/dist/provider-gemini/common/Gemini_StructuredGeneration.d.ts +10 -0
  106. package/dist/provider-gemini/common/Gemini_StructuredGeneration.d.ts.map +1 -0
  107. package/dist/provider-gemini/common/Gemini_TextEmbedding.d.ts +9 -0
  108. package/dist/provider-gemini/common/Gemini_TextEmbedding.d.ts.map +1 -0
  109. package/dist/provider-gemini/common/Gemini_TextGeneration.d.ts +10 -0
  110. package/dist/provider-gemini/common/Gemini_TextGeneration.d.ts.map +1 -0
  111. package/dist/provider-gemini/common/Gemini_TextRewriter.d.ts +10 -0
  112. package/dist/provider-gemini/common/Gemini_TextRewriter.d.ts.map +1 -0
  113. package/dist/provider-gemini/common/Gemini_TextSummary.d.ts +10 -0
  114. package/dist/provider-gemini/common/Gemini_TextSummary.d.ts.map +1 -0
  115. package/dist/provider-gemini/common/Gemini_ToolCalling.d.ts +10 -0
  116. package/dist/provider-gemini/common/Gemini_ToolCalling.d.ts.map +1 -0
  117. package/dist/{google-gemini → provider-gemini}/index.d.ts +1 -3
  118. package/dist/provider-gemini/index.d.ts.map +1 -0
  119. package/dist/{index-8651nz8y.js → provider-gemini/index.js} +43 -11
  120. package/dist/provider-gemini/index.js.map +13 -0
  121. package/dist/provider-gemini/registerGemini.d.ts +10 -0
  122. package/dist/provider-gemini/registerGemini.d.ts.map +1 -0
  123. package/dist/provider-gemini/registerGeminiInline.d.ts +8 -0
  124. package/dist/provider-gemini/registerGeminiInline.d.ts.map +1 -0
  125. package/dist/{anthropic/Anthropic_Worker.d.ts → provider-gemini/registerGeminiWorker.d.ts} +2 -2
  126. package/dist/provider-gemini/registerGeminiWorker.d.ts.map +1 -0
  127. package/dist/provider-gemini/runtime.d.ts +16 -0
  128. package/dist/provider-gemini/runtime.d.ts.map +1 -0
  129. package/dist/{google-gemini/index.js → provider-gemini/runtime.js} +281 -173
  130. package/dist/provider-gemini/runtime.js.map +26 -0
  131. package/dist/provider-hf-inference/HfInferenceProvider.d.ts +2 -19
  132. package/dist/provider-hf-inference/HfInferenceProvider.d.ts.map +1 -1
  133. package/dist/provider-hf-inference/HfInferenceQueuedProvider.d.ts +16 -0
  134. package/dist/provider-hf-inference/HfInferenceQueuedProvider.d.ts.map +1 -0
  135. package/dist/provider-hf-inference/common/HFI_Client.d.ts +12 -0
  136. package/dist/provider-hf-inference/common/HFI_Client.d.ts.map +1 -0
  137. package/dist/provider-hf-inference/common/HFI_JobRunFns.d.ts +2 -11
  138. package/dist/provider-hf-inference/common/HFI_JobRunFns.d.ts.map +1 -1
  139. package/dist/provider-hf-inference/common/HFI_ModelInfo.d.ts +9 -0
  140. package/dist/provider-hf-inference/common/HFI_ModelInfo.d.ts.map +1 -0
  141. package/dist/provider-hf-inference/common/HFI_ModelSchema.d.ts +31 -31
  142. package/dist/provider-hf-inference/common/HFI_ModelSchema.d.ts.map +1 -1
  143. package/dist/provider-hf-inference/common/HFI_ModelSearch.d.ts +8 -0
  144. package/dist/provider-hf-inference/common/HFI_ModelSearch.d.ts.map +1 -0
  145. package/dist/provider-hf-inference/common/HFI_TextEmbedding.d.ts +9 -0
  146. package/dist/provider-hf-inference/common/HFI_TextEmbedding.d.ts.map +1 -0
  147. package/dist/provider-hf-inference/common/HFI_TextGeneration.d.ts +10 -0
  148. package/dist/provider-hf-inference/common/HFI_TextGeneration.d.ts.map +1 -0
  149. package/dist/provider-hf-inference/common/HFI_TextRewriter.d.ts +10 -0
  150. package/dist/provider-hf-inference/common/HFI_TextRewriter.d.ts.map +1 -0
  151. package/dist/provider-hf-inference/common/HFI_TextSummary.d.ts +10 -0
  152. package/dist/provider-hf-inference/common/HFI_TextSummary.d.ts.map +1 -0
  153. package/dist/provider-hf-inference/common/HFI_ToolCalling.d.ts +10 -0
  154. package/dist/provider-hf-inference/common/HFI_ToolCalling.d.ts.map +1 -0
  155. package/dist/provider-hf-inference/index.d.ts +1 -3
  156. package/dist/provider-hf-inference/index.d.ts.map +1 -1
  157. package/dist/provider-hf-inference/index.js +98 -411
  158. package/dist/provider-hf-inference/index.js.map +7 -5
  159. package/dist/provider-hf-inference/registerHfInference.d.ts +10 -0
  160. package/dist/provider-hf-inference/registerHfInference.d.ts.map +1 -0
  161. package/dist/provider-hf-inference/registerHfInferenceInline.d.ts +8 -0
  162. package/dist/provider-hf-inference/registerHfInferenceInline.d.ts.map +1 -0
  163. package/dist/provider-hf-inference/registerHfInferenceWorker.d.ts +7 -0
  164. package/dist/provider-hf-inference/registerHfInferenceWorker.d.ts.map +1 -0
  165. package/dist/provider-hf-inference/runtime.d.ts +16 -0
  166. package/dist/provider-hf-inference/runtime.d.ts.map +1 -0
  167. package/dist/provider-hf-inference/runtime.js +592 -0
  168. package/dist/provider-hf-inference/runtime.js.map +25 -0
  169. package/dist/{hf-transformers → provider-hf-transformers}/HuggingFaceTransformersProvider.d.ts +2 -21
  170. package/dist/provider-hf-transformers/HuggingFaceTransformersProvider.d.ts.map +1 -0
  171. package/dist/provider-hf-transformers/HuggingFaceTransformersQueuedProvider.d.ts +16 -0
  172. package/dist/provider-hf-transformers/HuggingFaceTransformersQueuedProvider.d.ts.map +1 -0
  173. package/dist/provider-hf-transformers/common/HFT_BackgroundRemoval.d.ts +12 -0
  174. package/dist/provider-hf-transformers/common/HFT_BackgroundRemoval.d.ts.map +1 -0
  175. package/dist/{hf-transformers → provider-hf-transformers}/common/HFT_Constants.d.ts +25 -23
  176. package/dist/provider-hf-transformers/common/HFT_Constants.d.ts.map +1 -0
  177. package/dist/provider-hf-transformers/common/HFT_CountTokens.d.ts +10 -0
  178. package/dist/provider-hf-transformers/common/HFT_CountTokens.d.ts.map +1 -0
  179. package/dist/provider-hf-transformers/common/HFT_Download.d.ts +13 -0
  180. package/dist/provider-hf-transformers/common/HFT_Download.d.ts.map +1 -0
  181. package/dist/provider-hf-transformers/common/HFT_ImageClassification.d.ts +13 -0
  182. package/dist/provider-hf-transformers/common/HFT_ImageClassification.d.ts.map +1 -0
  183. package/dist/provider-hf-transformers/common/HFT_ImageEmbedding.d.ts +12 -0
  184. package/dist/provider-hf-transformers/common/HFT_ImageEmbedding.d.ts.map +1 -0
  185. package/dist/provider-hf-transformers/common/HFT_ImageHelpers.d.ts +11 -0
  186. package/dist/provider-hf-transformers/common/HFT_ImageHelpers.d.ts.map +1 -0
  187. package/dist/provider-hf-transformers/common/HFT_ImageSegmentation.d.ts +12 -0
  188. package/dist/provider-hf-transformers/common/HFT_ImageSegmentation.d.ts.map +1 -0
  189. package/dist/provider-hf-transformers/common/HFT_ImageToText.d.ts +12 -0
  190. package/dist/provider-hf-transformers/common/HFT_ImageToText.d.ts.map +1 -0
  191. package/dist/provider-hf-transformers/common/HFT_InlineLifecycle.d.ts +7 -0
  192. package/dist/provider-hf-transformers/common/HFT_InlineLifecycle.d.ts.map +1 -0
  193. package/dist/{hf-transformers → provider-hf-transformers}/common/HFT_JobRunFns.d.ts +673 -789
  194. package/dist/provider-hf-transformers/common/HFT_JobRunFns.d.ts.map +1 -0
  195. package/dist/provider-hf-transformers/common/HFT_ModelInfo.d.ts +9 -0
  196. package/dist/provider-hf-transformers/common/HFT_ModelInfo.d.ts.map +1 -0
  197. package/dist/{hf-transformers → provider-hf-transformers}/common/HFT_ModelSchema.d.ts +37 -37
  198. package/dist/provider-hf-transformers/common/HFT_ModelSchema.d.ts.map +1 -0
  199. package/dist/provider-hf-transformers/common/HFT_ModelSearch.d.ts +8 -0
  200. package/dist/provider-hf-transformers/common/HFT_ModelSearch.d.ts.map +1 -0
  201. package/dist/provider-hf-transformers/common/HFT_ObjectDetection.d.ts +13 -0
  202. package/dist/provider-hf-transformers/common/HFT_ObjectDetection.d.ts.map +1 -0
  203. package/dist/provider-hf-transformers/common/HFT_OnnxDtypes.d.ts +23 -0
  204. package/dist/provider-hf-transformers/common/HFT_OnnxDtypes.d.ts.map +1 -0
  205. package/dist/provider-hf-transformers/common/HFT_Pipeline.d.ts +32 -0
  206. package/dist/provider-hf-transformers/common/HFT_Pipeline.d.ts.map +1 -0
  207. package/dist/provider-hf-transformers/common/HFT_Streaming.d.ts +24 -0
  208. package/dist/provider-hf-transformers/common/HFT_Streaming.d.ts.map +1 -0
  209. package/dist/provider-hf-transformers/common/HFT_StructuredGeneration.d.ts +10 -0
  210. package/dist/provider-hf-transformers/common/HFT_StructuredGeneration.d.ts.map +1 -0
  211. package/dist/provider-hf-transformers/common/HFT_TextClassification.d.ts +9 -0
  212. package/dist/provider-hf-transformers/common/HFT_TextClassification.d.ts.map +1 -0
  213. package/dist/provider-hf-transformers/common/HFT_TextEmbedding.d.ts +13 -0
  214. package/dist/provider-hf-transformers/common/HFT_TextEmbedding.d.ts.map +1 -0
  215. package/dist/provider-hf-transformers/common/HFT_TextFillMask.d.ts +9 -0
  216. package/dist/provider-hf-transformers/common/HFT_TextFillMask.d.ts.map +1 -0
  217. package/dist/provider-hf-transformers/common/HFT_TextGeneration.d.ts +14 -0
  218. package/dist/provider-hf-transformers/common/HFT_TextGeneration.d.ts.map +1 -0
  219. package/dist/provider-hf-transformers/common/HFT_TextLanguageDetection.d.ts +9 -0
  220. package/dist/provider-hf-transformers/common/HFT_TextLanguageDetection.d.ts.map +1 -0
  221. package/dist/provider-hf-transformers/common/HFT_TextNamedEntityRecognition.d.ts +9 -0
  222. package/dist/provider-hf-transformers/common/HFT_TextNamedEntityRecognition.d.ts.map +1 -0
  223. package/dist/provider-hf-transformers/common/HFT_TextOutput.d.ts +8 -0
  224. package/dist/provider-hf-transformers/common/HFT_TextOutput.d.ts.map +1 -0
  225. package/dist/provider-hf-transformers/common/HFT_TextQuestionAnswer.d.ts +14 -0
  226. package/dist/provider-hf-transformers/common/HFT_TextQuestionAnswer.d.ts.map +1 -0
  227. package/dist/provider-hf-transformers/common/HFT_TextRewriter.d.ts +14 -0
  228. package/dist/provider-hf-transformers/common/HFT_TextRewriter.d.ts.map +1 -0
  229. package/dist/provider-hf-transformers/common/HFT_TextSummary.d.ts +14 -0
  230. package/dist/provider-hf-transformers/common/HFT_TextSummary.d.ts.map +1 -0
  231. package/dist/provider-hf-transformers/common/HFT_TextTranslation.d.ts +14 -0
  232. package/dist/provider-hf-transformers/common/HFT_TextTranslation.d.ts.map +1 -0
  233. package/dist/provider-hf-transformers/common/HFT_ToolCalling.d.ts +10 -0
  234. package/dist/provider-hf-transformers/common/HFT_ToolCalling.d.ts.map +1 -0
  235. package/dist/provider-hf-transformers/common/HFT_ToolMarkup.d.ts +40 -0
  236. package/dist/provider-hf-transformers/common/HFT_ToolMarkup.d.ts.map +1 -0
  237. package/dist/provider-hf-transformers/common/HFT_Unload.d.ts +13 -0
  238. package/dist/provider-hf-transformers/common/HFT_Unload.d.ts.map +1 -0
  239. package/dist/{hf-transformers → provider-hf-transformers}/index.d.ts +4 -2
  240. package/dist/provider-hf-transformers/index.d.ts.map +1 -0
  241. package/dist/provider-hf-transformers/index.js +513 -0
  242. package/dist/provider-hf-transformers/index.js.map +16 -0
  243. package/dist/provider-hf-transformers/registerHuggingFaceTransformers.d.ts +14 -0
  244. package/dist/provider-hf-transformers/registerHuggingFaceTransformers.d.ts.map +1 -0
  245. package/dist/provider-hf-transformers/registerHuggingFaceTransformersInline.d.ts +15 -0
  246. package/dist/provider-hf-transformers/registerHuggingFaceTransformersInline.d.ts.map +1 -0
  247. package/dist/provider-hf-transformers/registerHuggingFaceTransformersWorker.d.ts +7 -0
  248. package/dist/provider-hf-transformers/registerHuggingFaceTransformersWorker.d.ts.map +1 -0
  249. package/dist/provider-hf-transformers/runtime.d.ts +21 -0
  250. package/dist/provider-hf-transformers/runtime.d.ts.map +1 -0
  251. package/dist/{index-j4g81r4k.js → provider-hf-transformers/runtime.js} +1561 -927
  252. package/dist/provider-hf-transformers/runtime.js.map +49 -0
  253. package/dist/provider-llamacpp/LlamaCppProvider.d.ts +2 -15
  254. package/dist/provider-llamacpp/LlamaCppProvider.d.ts.map +1 -1
  255. package/dist/provider-llamacpp/LlamaCppQueuedProvider.d.ts +16 -0
  256. package/dist/provider-llamacpp/LlamaCppQueuedProvider.d.ts.map +1 -0
  257. package/dist/provider-llamacpp/common/LlamaCpp_CountTokens.d.ts +10 -0
  258. package/dist/provider-llamacpp/common/LlamaCpp_CountTokens.d.ts.map +1 -0
  259. package/dist/provider-llamacpp/common/LlamaCpp_Download.d.ts +9 -0
  260. package/dist/provider-llamacpp/common/LlamaCpp_Download.d.ts.map +1 -0
  261. package/dist/provider-llamacpp/common/LlamaCpp_JobRunFns.d.ts +2 -18
  262. package/dist/provider-llamacpp/common/LlamaCpp_JobRunFns.d.ts.map +1 -1
  263. package/dist/provider-llamacpp/common/LlamaCpp_ModelInfo.d.ts +9 -0
  264. package/dist/provider-llamacpp/common/LlamaCpp_ModelInfo.d.ts.map +1 -0
  265. package/dist/provider-llamacpp/common/LlamaCpp_ModelSchema.d.ts +31 -31
  266. package/dist/provider-llamacpp/common/LlamaCpp_ModelSchema.d.ts.map +1 -1
  267. package/dist/provider-llamacpp/common/LlamaCpp_ModelSearch.d.ts +8 -0
  268. package/dist/provider-llamacpp/common/LlamaCpp_ModelSearch.d.ts.map +1 -0
  269. package/dist/provider-llamacpp/common/LlamaCpp_Runtime.d.ts +31 -0
  270. package/dist/provider-llamacpp/common/LlamaCpp_Runtime.d.ts.map +1 -0
  271. package/dist/provider-llamacpp/common/LlamaCpp_StructuredGeneration.d.ts +10 -0
  272. package/dist/provider-llamacpp/common/LlamaCpp_StructuredGeneration.d.ts.map +1 -0
  273. package/dist/provider-llamacpp/common/LlamaCpp_TextEmbedding.d.ts +9 -0
  274. package/dist/provider-llamacpp/common/LlamaCpp_TextEmbedding.d.ts.map +1 -0
  275. package/dist/provider-llamacpp/common/LlamaCpp_TextGeneration.d.ts +10 -0
  276. package/dist/provider-llamacpp/common/LlamaCpp_TextGeneration.d.ts.map +1 -0
  277. package/dist/provider-llamacpp/common/LlamaCpp_TextRewriter.d.ts +10 -0
  278. package/dist/provider-llamacpp/common/LlamaCpp_TextRewriter.d.ts.map +1 -0
  279. package/dist/provider-llamacpp/common/LlamaCpp_TextSummary.d.ts +10 -0
  280. package/dist/provider-llamacpp/common/LlamaCpp_TextSummary.d.ts.map +1 -0
  281. package/dist/provider-llamacpp/common/LlamaCpp_ToolCalling.d.ts +10 -0
  282. package/dist/provider-llamacpp/common/LlamaCpp_ToolCalling.d.ts.map +1 -0
  283. package/dist/provider-llamacpp/common/LlamaCpp_Unload.d.ts +9 -0
  284. package/dist/provider-llamacpp/common/LlamaCpp_Unload.d.ts.map +1 -0
  285. package/dist/provider-llamacpp/index.d.ts +1 -3
  286. package/dist/provider-llamacpp/index.d.ts.map +1 -1
  287. package/dist/provider-llamacpp/index.js +121 -725
  288. package/dist/provider-llamacpp/index.js.map +7 -5
  289. package/dist/provider-llamacpp/registerLlamaCpp.d.ts +10 -0
  290. package/dist/provider-llamacpp/registerLlamaCpp.d.ts.map +1 -0
  291. package/dist/provider-llamacpp/registerLlamaCppInline.d.ts +8 -0
  292. package/dist/provider-llamacpp/registerLlamaCppInline.d.ts.map +1 -0
  293. package/dist/provider-llamacpp/registerLlamaCppWorker.d.ts +7 -0
  294. package/dist/provider-llamacpp/registerLlamaCppWorker.d.ts.map +1 -0
  295. package/dist/provider-llamacpp/runtime.d.ts +16 -0
  296. package/dist/provider-llamacpp/runtime.d.ts.map +1 -0
  297. package/dist/provider-llamacpp/runtime.js +929 -0
  298. package/dist/provider-llamacpp/runtime.js.map +29 -0
  299. package/dist/provider-ollama/OllamaProvider.d.ts +2 -15
  300. package/dist/provider-ollama/OllamaProvider.d.ts.map +1 -1
  301. package/dist/provider-ollama/OllamaQueuedProvider.d.ts +16 -0
  302. package/dist/provider-ollama/OllamaQueuedProvider.d.ts.map +1 -0
  303. package/dist/provider-ollama/common/Ollama_Client.browser.d.ts +13 -0
  304. package/dist/provider-ollama/common/Ollama_Client.browser.d.ts.map +1 -0
  305. package/dist/provider-ollama/common/Ollama_Client.d.ts +13 -0
  306. package/dist/provider-ollama/common/Ollama_Client.d.ts.map +1 -0
  307. package/dist/provider-ollama/common/Ollama_JobRunFns.browser.d.ts +362 -11
  308. package/dist/provider-ollama/common/Ollama_JobRunFns.browser.d.ts.map +1 -1
  309. package/dist/provider-ollama/common/Ollama_JobRunFns.d.ts +361 -11
  310. package/dist/provider-ollama/common/Ollama_JobRunFns.d.ts.map +1 -1
  311. package/dist/provider-ollama/common/Ollama_ModelInfo.d.ts +11 -0
  312. package/dist/provider-ollama/common/Ollama_ModelInfo.d.ts.map +1 -0
  313. package/dist/provider-ollama/common/Ollama_ModelSchema.d.ts +30 -30
  314. package/dist/provider-ollama/common/Ollama_ModelSchema.d.ts.map +1 -1
  315. package/dist/provider-ollama/common/Ollama_ModelSearch.d.ts +11 -0
  316. package/dist/provider-ollama/common/Ollama_ModelSearch.d.ts.map +1 -0
  317. package/dist/provider-ollama/common/Ollama_ModelUtil.d.ts +8 -0
  318. package/dist/provider-ollama/common/Ollama_ModelUtil.d.ts.map +1 -0
  319. package/dist/provider-ollama/common/Ollama_TextEmbedding.d.ts +11 -0
  320. package/dist/provider-ollama/common/Ollama_TextEmbedding.d.ts.map +1 -0
  321. package/dist/provider-ollama/common/Ollama_TextGeneration.d.ts +12 -0
  322. package/dist/provider-ollama/common/Ollama_TextGeneration.d.ts.map +1 -0
  323. package/dist/provider-ollama/common/Ollama_TextRewriter.d.ts +12 -0
  324. package/dist/provider-ollama/common/Ollama_TextRewriter.d.ts.map +1 -0
  325. package/dist/provider-ollama/common/Ollama_TextSummary.d.ts +12 -0
  326. package/dist/provider-ollama/common/Ollama_TextSummary.d.ts.map +1 -0
  327. package/dist/provider-ollama/common/Ollama_ToolCalling.d.ts +16 -0
  328. package/dist/provider-ollama/common/Ollama_ToolCalling.d.ts.map +1 -0
  329. package/dist/provider-ollama/index.browser.d.ts +1 -3
  330. package/dist/provider-ollama/index.browser.d.ts.map +1 -1
  331. package/dist/provider-ollama/index.browser.js +18 -396
  332. package/dist/provider-ollama/index.browser.js.map +6 -7
  333. package/dist/provider-ollama/index.d.ts +1 -3
  334. package/dist/provider-ollama/index.d.ts.map +1 -1
  335. package/dist/provider-ollama/index.js +93 -382
  336. package/dist/provider-ollama/index.js.map +7 -5
  337. package/dist/provider-ollama/registerOllama.d.ts +10 -0
  338. package/dist/provider-ollama/registerOllama.d.ts.map +1 -0
  339. package/dist/provider-ollama/registerOllamaInline.browser.d.ts +8 -0
  340. package/dist/provider-ollama/registerOllamaInline.browser.d.ts.map +1 -0
  341. package/dist/provider-ollama/registerOllamaInline.d.ts +8 -0
  342. package/dist/provider-ollama/registerOllamaInline.d.ts.map +1 -0
  343. package/dist/provider-ollama/registerOllamaWorker.browser.d.ts +7 -0
  344. package/dist/provider-ollama/registerOllamaWorker.browser.d.ts.map +1 -0
  345. package/dist/{google-gemini/Gemini_Worker.d.ts → provider-ollama/registerOllamaWorker.d.ts} +2 -2
  346. package/dist/provider-ollama/registerOllamaWorker.d.ts.map +1 -0
  347. package/dist/provider-ollama/runtime.browser.d.ts +16 -0
  348. package/dist/provider-ollama/runtime.browser.d.ts.map +1 -0
  349. package/dist/provider-ollama/runtime.browser.js +528 -0
  350. package/dist/provider-ollama/runtime.browser.js.map +24 -0
  351. package/dist/provider-ollama/runtime.d.ts +16 -0
  352. package/dist/provider-ollama/runtime.d.ts.map +1 -0
  353. package/dist/provider-ollama/runtime.js +538 -0
  354. package/dist/provider-ollama/runtime.js.map +24 -0
  355. package/dist/provider-openai/OpenAiProvider.d.ts +2 -19
  356. package/dist/provider-openai/OpenAiProvider.d.ts.map +1 -1
  357. package/dist/provider-openai/OpenAiQueuedProvider.d.ts +16 -0
  358. package/dist/provider-openai/OpenAiQueuedProvider.d.ts.map +1 -0
  359. package/dist/provider-openai/common/OpenAI_Client.d.ts +10 -0
  360. package/dist/provider-openai/common/OpenAI_Client.d.ts.map +1 -0
  361. package/dist/provider-openai/common/OpenAI_CountTokens.d.ts +10 -0
  362. package/dist/provider-openai/common/OpenAI_CountTokens.d.ts.map +1 -0
  363. package/dist/provider-openai/common/OpenAI_JobRunFns.d.ts +2 -15
  364. package/dist/provider-openai/common/OpenAI_JobRunFns.d.ts.map +1 -1
  365. package/dist/provider-openai/common/OpenAI_ModelInfo.d.ts +9 -0
  366. package/dist/provider-openai/common/OpenAI_ModelInfo.d.ts.map +1 -0
  367. package/dist/provider-openai/common/OpenAI_ModelSchema.d.ts +31 -31
  368. package/dist/provider-openai/common/OpenAI_ModelSchema.d.ts.map +1 -1
  369. package/dist/provider-openai/common/OpenAI_ModelSearch.d.ts +8 -0
  370. package/dist/provider-openai/common/OpenAI_ModelSearch.d.ts.map +1 -0
  371. package/dist/provider-openai/common/OpenAI_StructuredGeneration.d.ts +10 -0
  372. package/dist/provider-openai/common/OpenAI_StructuredGeneration.d.ts.map +1 -0
  373. package/dist/provider-openai/common/OpenAI_TextEmbedding.d.ts +9 -0
  374. package/dist/provider-openai/common/OpenAI_TextEmbedding.d.ts.map +1 -0
  375. package/dist/provider-openai/common/OpenAI_TextGeneration.d.ts +10 -0
  376. package/dist/provider-openai/common/OpenAI_TextGeneration.d.ts.map +1 -0
  377. package/dist/provider-openai/common/OpenAI_TextRewriter.d.ts +10 -0
  378. package/dist/provider-openai/common/OpenAI_TextRewriter.d.ts.map +1 -0
  379. package/dist/provider-openai/common/OpenAI_TextSummary.d.ts +10 -0
  380. package/dist/provider-openai/common/OpenAI_TextSummary.d.ts.map +1 -0
  381. package/dist/provider-openai/common/OpenAI_ToolCalling.d.ts +10 -0
  382. package/dist/provider-openai/common/OpenAI_ToolCalling.d.ts.map +1 -0
  383. package/dist/provider-openai/index.d.ts +1 -3
  384. package/dist/provider-openai/index.d.ts.map +1 -1
  385. package/dist/provider-openai/index.js +108 -519
  386. package/dist/provider-openai/index.js.map +7 -5
  387. package/dist/provider-openai/registerOpenAi.d.ts +10 -0
  388. package/dist/provider-openai/registerOpenAi.d.ts.map +1 -0
  389. package/dist/provider-openai/registerOpenAiInline.d.ts +8 -0
  390. package/dist/provider-openai/registerOpenAiInline.d.ts.map +1 -0
  391. package/dist/{ggml/model/GgmlLocalModel.d.ts → provider-openai/registerOpenAiWorker.d.ts} +2 -2
  392. package/dist/provider-openai/registerOpenAiWorker.d.ts.map +1 -0
  393. package/dist/provider-openai/runtime.d.ts +16 -0
  394. package/dist/provider-openai/runtime.d.ts.map +1 -0
  395. package/dist/provider-openai/runtime.js +662 -0
  396. package/dist/provider-openai/runtime.js.map +25 -0
  397. package/dist/provider-tf-mediapipe/TensorFlowMediaPipeProvider.d.ts +24 -0
  398. package/dist/provider-tf-mediapipe/TensorFlowMediaPipeProvider.d.ts.map +1 -0
  399. package/dist/provider-tf-mediapipe/TensorFlowMediaPipeQueuedProvider.d.ts +16 -0
  400. package/dist/provider-tf-mediapipe/TensorFlowMediaPipeQueuedProvider.d.ts.map +1 -0
  401. package/dist/provider-tf-mediapipe/common/TFMP_Client.d.ts +8 -0
  402. package/dist/provider-tf-mediapipe/common/TFMP_Client.d.ts.map +1 -0
  403. package/dist/{tf-mediapipe → provider-tf-mediapipe}/common/TFMP_Constants.d.ts +1 -0
  404. package/dist/provider-tf-mediapipe/common/TFMP_Constants.d.ts.map +1 -0
  405. package/dist/provider-tf-mediapipe/common/TFMP_Download.d.ts +9 -0
  406. package/dist/provider-tf-mediapipe/common/TFMP_Download.d.ts.map +1 -0
  407. package/dist/provider-tf-mediapipe/common/TFMP_FaceDetector.d.ts +9 -0
  408. package/dist/provider-tf-mediapipe/common/TFMP_FaceDetector.d.ts.map +1 -0
  409. package/dist/provider-tf-mediapipe/common/TFMP_FaceLandmarker.d.ts +9 -0
  410. package/dist/provider-tf-mediapipe/common/TFMP_FaceLandmarker.d.ts.map +1 -0
  411. package/dist/provider-tf-mediapipe/common/TFMP_GestureRecognizer.d.ts +9 -0
  412. package/dist/provider-tf-mediapipe/common/TFMP_GestureRecognizer.d.ts.map +1 -0
  413. package/dist/provider-tf-mediapipe/common/TFMP_HandLandmarker.d.ts +9 -0
  414. package/dist/provider-tf-mediapipe/common/TFMP_HandLandmarker.d.ts.map +1 -0
  415. package/dist/provider-tf-mediapipe/common/TFMP_ImageClassification.d.ts +9 -0
  416. package/dist/provider-tf-mediapipe/common/TFMP_ImageClassification.d.ts.map +1 -0
  417. package/dist/provider-tf-mediapipe/common/TFMP_ImageEmbedding.d.ts +9 -0
  418. package/dist/provider-tf-mediapipe/common/TFMP_ImageEmbedding.d.ts.map +1 -0
  419. package/dist/provider-tf-mediapipe/common/TFMP_ImageSegmentation.d.ts +9 -0
  420. package/dist/provider-tf-mediapipe/common/TFMP_ImageSegmentation.d.ts.map +1 -0
  421. package/dist/{tf-mediapipe → provider-tf-mediapipe}/common/TFMP_JobRunFns.d.ts +340 -408
  422. package/dist/provider-tf-mediapipe/common/TFMP_JobRunFns.d.ts.map +1 -0
  423. package/dist/provider-tf-mediapipe/common/TFMP_ModelInfo.d.ts +9 -0
  424. package/dist/provider-tf-mediapipe/common/TFMP_ModelInfo.d.ts.map +1 -0
  425. package/dist/{tf-mediapipe → provider-tf-mediapipe}/common/TFMP_ModelSchema.d.ts +40 -37
  426. package/dist/provider-tf-mediapipe/common/TFMP_ModelSchema.d.ts.map +1 -0
  427. package/dist/provider-tf-mediapipe/common/TFMP_ModelSearch.d.ts +9 -0
  428. package/dist/provider-tf-mediapipe/common/TFMP_ModelSearch.d.ts.map +1 -0
  429. package/dist/provider-tf-mediapipe/common/TFMP_ObjectDetection.d.ts +9 -0
  430. package/dist/provider-tf-mediapipe/common/TFMP_ObjectDetection.d.ts.map +1 -0
  431. package/dist/provider-tf-mediapipe/common/TFMP_PoseLandmarker.d.ts +9 -0
  432. package/dist/provider-tf-mediapipe/common/TFMP_PoseLandmarker.d.ts.map +1 -0
  433. package/dist/provider-tf-mediapipe/common/TFMP_Runtime.d.ts +43 -0
  434. package/dist/provider-tf-mediapipe/common/TFMP_Runtime.d.ts.map +1 -0
  435. package/dist/provider-tf-mediapipe/common/TFMP_TextClassification.d.ts +9 -0
  436. package/dist/provider-tf-mediapipe/common/TFMP_TextClassification.d.ts.map +1 -0
  437. package/dist/provider-tf-mediapipe/common/TFMP_TextEmbedding.d.ts +9 -0
  438. package/dist/provider-tf-mediapipe/common/TFMP_TextEmbedding.d.ts.map +1 -0
  439. package/dist/provider-tf-mediapipe/common/TFMP_TextLanguageDetection.d.ts +9 -0
  440. package/dist/provider-tf-mediapipe/common/TFMP_TextLanguageDetection.d.ts.map +1 -0
  441. package/dist/provider-tf-mediapipe/common/TFMP_Unload.d.ts +9 -0
  442. package/dist/provider-tf-mediapipe/common/TFMP_Unload.d.ts.map +1 -0
  443. package/dist/{tf-mediapipe → provider-tf-mediapipe}/index.d.ts +1 -3
  444. package/dist/provider-tf-mediapipe/index.d.ts.map +1 -0
  445. package/dist/provider-tf-mediapipe/index.js +129 -0
  446. package/dist/provider-tf-mediapipe/index.js.map +13 -0
  447. package/dist/provider-tf-mediapipe/registerTensorFlowMediaPipe.d.ts +10 -0
  448. package/dist/provider-tf-mediapipe/registerTensorFlowMediaPipe.d.ts.map +1 -0
  449. package/dist/provider-tf-mediapipe/registerTensorFlowMediaPipeInline.d.ts +8 -0
  450. package/dist/provider-tf-mediapipe/registerTensorFlowMediaPipeInline.d.ts.map +1 -0
  451. package/dist/provider-tf-mediapipe/registerTensorFlowMediaPipeWorker.d.ts +7 -0
  452. package/dist/provider-tf-mediapipe/registerTensorFlowMediaPipeWorker.d.ts.map +1 -0
  453. package/dist/provider-tf-mediapipe/runtime.d.ts +16 -0
  454. package/dist/provider-tf-mediapipe/runtime.d.ts.map +1 -0
  455. package/dist/{tf-mediapipe/index.js → provider-tf-mediapipe/runtime.js} +417 -380
  456. package/dist/provider-tf-mediapipe/runtime.js.map +33 -0
  457. package/package.json +66 -40
  458. package/dist/HFT_JobRunFns-8hcpea4c.js +0 -80
  459. package/dist/HFT_JobRunFns-8hcpea4c.js.map +0 -9
  460. package/dist/anthropic/AnthropicProvider.d.ts.map +0 -1
  461. package/dist/anthropic/Anthropic_Worker.d.ts.map +0 -1
  462. package/dist/anthropic/common/Anthropic_Constants.d.ts.map +0 -1
  463. package/dist/anthropic/common/Anthropic_JobRunFns.d.ts +0 -24
  464. package/dist/anthropic/common/Anthropic_JobRunFns.d.ts.map +0 -1
  465. package/dist/anthropic/common/Anthropic_ModelSchema.d.ts.map +0 -1
  466. package/dist/anthropic/index.d.ts.map +0 -1
  467. package/dist/anthropic/index.js.map +0 -11
  468. package/dist/ggml/model/GgmlLocalModel.d.ts.map +0 -1
  469. package/dist/google-gemini/Gemini_Worker.d.ts.map +0 -1
  470. package/dist/google-gemini/GoogleGeminiProvider.d.ts.map +0 -1
  471. package/dist/google-gemini/common/Gemini_Constants.d.ts.map +0 -1
  472. package/dist/google-gemini/common/Gemini_JobRunFns.d.ts +0 -25
  473. package/dist/google-gemini/common/Gemini_JobRunFns.d.ts.map +0 -1
  474. package/dist/google-gemini/common/Gemini_ModelSchema.d.ts.map +0 -1
  475. package/dist/google-gemini/index.d.ts.map +0 -1
  476. package/dist/google-gemini/index.js.map +0 -11
  477. package/dist/hf-transformers/HFT_Worker.d.ts +0 -7
  478. package/dist/hf-transformers/HFT_Worker.d.ts.map +0 -1
  479. package/dist/hf-transformers/HuggingFaceTransformersProvider.d.ts.map +0 -1
  480. package/dist/hf-transformers/common/HFT_Constants.d.ts.map +0 -1
  481. package/dist/hf-transformers/common/HFT_JobRunFns.d.ts.map +0 -1
  482. package/dist/hf-transformers/common/HFT_ModelSchema.d.ts.map +0 -1
  483. package/dist/hf-transformers/index.d.ts.map +0 -1
  484. package/dist/hf-transformers/index.js +0 -116
  485. package/dist/hf-transformers/index.js.map +0 -10
  486. package/dist/index-60ev6k93.js.map +0 -12
  487. package/dist/index-6j5pq722.js +0 -11
  488. package/dist/index-6j5pq722.js.map +0 -9
  489. package/dist/index-8651nz8y.js.map +0 -12
  490. package/dist/index-j4g81r4k.js.map +0 -10
  491. package/dist/index-pkd79j8b.js +0 -58
  492. package/dist/index-pkd79j8b.js.map +0 -10
  493. package/dist/index-q2t627d5.js +0 -88
  494. package/dist/index-q2t627d5.js.map +0 -12
  495. package/dist/index-tp5s7355.js +0 -77
  496. package/dist/index-tp5s7355.js.map +0 -12
  497. package/dist/index-v72vr07f.js +0 -81
  498. package/dist/index-v72vr07f.js.map +0 -12
  499. package/dist/index-wr57rwyx.js +0 -104
  500. package/dist/index-wr57rwyx.js.map +0 -12
  501. package/dist/index-zqq3kw0n.js +0 -171
  502. package/dist/index-zqq3kw0n.js.map +0 -11
  503. package/dist/index.browser-6j5pq722.js +0 -11
  504. package/dist/index.browser-6j5pq722.js.map +0 -9
  505. package/dist/index.d.ts +0 -33
  506. package/dist/index.d.ts.map +0 -1
  507. package/dist/index.js +0 -316
  508. package/dist/index.js.map +0 -15
  509. package/dist/provider-hf-inference/HFI_Worker.d.ts +0 -7
  510. package/dist/provider-hf-inference/HFI_Worker.d.ts.map +0 -1
  511. package/dist/provider-llamacpp/LlamaCpp_Worker.d.ts +0 -7
  512. package/dist/provider-llamacpp/LlamaCpp_Worker.d.ts.map +0 -1
  513. package/dist/provider-ollama/Ollama_Worker.browser.d.ts +0 -7
  514. package/dist/provider-ollama/Ollama_Worker.browser.d.ts.map +0 -1
  515. package/dist/provider-ollama/Ollama_Worker.d.ts +0 -7
  516. package/dist/provider-ollama/Ollama_Worker.d.ts.map +0 -1
  517. package/dist/provider-openai/OpenAI_Worker.d.ts +0 -7
  518. package/dist/provider-openai/OpenAI_Worker.d.ts.map +0 -1
  519. package/dist/tf-mediapipe/TFMP_Worker.d.ts +0 -7
  520. package/dist/tf-mediapipe/TFMP_Worker.d.ts.map +0 -1
  521. package/dist/tf-mediapipe/TensorFlowMediaPipeProvider.d.ts +0 -41
  522. package/dist/tf-mediapipe/TensorFlowMediaPipeProvider.d.ts.map +0 -1
  523. package/dist/tf-mediapipe/common/TFMP_Constants.d.ts.map +0 -1
  524. package/dist/tf-mediapipe/common/TFMP_JobRunFns.d.ts.map +0 -1
  525. package/dist/tf-mediapipe/common/TFMP_ModelSchema.d.ts.map +0 -1
  526. package/dist/tf-mediapipe/index.d.ts.map +0 -1
  527. package/dist/tf-mediapipe/index.js.map +0 -14
  528. package/dist/types.d.ts +0 -7
  529. package/dist/types.d.ts.map +0 -1
  530. package/dist/web-browser/WebBrowserProvider.d.ts.map +0 -1
  531. package/dist/web-browser/WebBrowser_Worker.d.ts +0 -7
  532. package/dist/web-browser/WebBrowser_Worker.d.ts.map +0 -1
  533. package/dist/web-browser/common/WebBrowser_Constants.d.ts.map +0 -1
  534. package/dist/web-browser/common/WebBrowser_JobRunFns.d.ts +0 -20
  535. package/dist/web-browser/common/WebBrowser_JobRunFns.d.ts.map +0 -1
  536. package/dist/web-browser/common/WebBrowser_ModelSchema.d.ts.map +0 -1
  537. package/dist/web-browser/index.d.ts.map +0 -1
  538. package/dist/web-browser/index.js.map +0 -14
  539. /package/dist/{anthropic → provider-anthropic}/common/Anthropic_Constants.d.ts +0 -0
  540. /package/dist/{web-browser → provider-chrome}/common/WebBrowser_Constants.d.ts +0 -0
  541. /package/dist/{google-gemini → provider-gemini}/common/Gemini_Constants.d.ts +0 -0
@@ -1,26 +1,58 @@
1
- import {
2
- HTF_CACHE_NAME
3
- } from "./index-zqq3kw0n.js";
4
- import {
5
- __require
6
- } from "./index-6j5pq722.js";
1
+ var __defProp = Object.defineProperty;
2
+ var __returnValue = (v) => v;
3
+ function __exportSetter(name, newValue) {
4
+ this[name] = __returnValue.bind(null, newValue);
5
+ }
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, {
9
+ get: all[name],
10
+ enumerable: true,
11
+ configurable: true,
12
+ set: __exportSetter.bind(all, name)
13
+ });
14
+ };
15
+ var __esm = (fn, res) => () => (fn && (res = fn(fn = 0)), res);
16
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
17
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
18
+ }) : x)(function(x) {
19
+ if (typeof require !== "undefined")
20
+ return require.apply(this, arguments);
21
+ throw Error('Dynamic require of "' + x + '" is not supported');
22
+ });
7
23
 
8
- // src/hf-transformers/common/HFT_JobRunFns.ts
9
- import { buildToolDescription, filterValidToolCalls, toTextFlatMessages } from "@workglow/ai";
10
- import { getLogger, parsePartialJson } from "@workglow/util";
11
- var _transformersSdk;
24
+ // src/provider-hf-transformers/common/HFT_Pipeline.ts
25
+ var exports_HFT_Pipeline = {};
26
+ __export(exports_HFT_Pipeline, {
27
+ setHftCacheDir: () => setHftCacheDir,
28
+ removeCachedPipeline: () => removeCachedPipeline,
29
+ loadTransformersSDK: () => loadTransformersSDK,
30
+ hasCachedPipeline: () => hasCachedPipeline,
31
+ getPipelineCacheKey: () => getPipelineCacheKey,
32
+ getPipeline: () => getPipeline,
33
+ clearPipelineCache: () => clearPipelineCache
34
+ });
35
+ import { getLogger } from "@workglow/util/worker";
36
+ function setHftCacheDir(dir) {
37
+ _cacheDir = dir;
38
+ if (_transformersSdk) {
39
+ _transformersSdk.env.cacheDir = dir;
40
+ }
41
+ }
12
42
  async function loadTransformersSDK() {
13
43
  if (!_transformersSdk) {
14
44
  try {
15
45
  _transformersSdk = await import("@huggingface/transformers");
16
46
  _transformersSdk.env.fetch = abortableFetch;
47
+ if (_cacheDir) {
48
+ _transformersSdk.env.cacheDir = _cacheDir;
49
+ }
17
50
  } catch {
18
51
  throw new Error("@huggingface/transformers is required for HuggingFace Transformers tasks. Install it with: bun add @huggingface/transformers");
19
52
  }
20
53
  }
21
54
  return _transformersSdk;
22
55
  }
23
- var modelAbortControllers = new Map;
24
56
  function abortableFetch(url, options) {
25
57
  let signal;
26
58
  try {
@@ -34,17 +66,24 @@ function abortableFetch(url, options) {
34
66
  } catch {}
35
67
  return fetch(url, { ...options, ...signal ? { signal } : {} });
36
68
  }
37
- var pipelines = new Map;
38
- var pipelineLoadPromises = new Map;
39
69
  function clearPipelineCache() {
40
70
  pipelines.clear();
41
71
  }
72
+ function hasCachedPipeline(cacheKey) {
73
+ return pipelines.has(cacheKey);
74
+ }
75
+ function removeCachedPipeline(cacheKey) {
76
+ return pipelines.delete(cacheKey);
77
+ }
78
+ function isBrowserEnv() {
79
+ return typeof globalThis !== "undefined" && typeof globalThis.window !== "undefined";
80
+ }
42
81
  function getPipelineCacheKey(model) {
43
82
  const dtype = model.provider_config.dtype || "q8";
44
83
  const device = model.provider_config.device || "";
45
84
  return `${model.provider_config.model_path}:${model.provider_config.pipeline}:${dtype}:${device}`;
46
85
  }
47
- var getPipeline = async (model, onProgress, options = {}, signal, progressScaleMax = 10) => {
86
+ async function getPipeline(model, onProgress, options = {}, signal, progressScaleMax = 10) {
48
87
  const cacheKey = getPipelineCacheKey(model);
49
88
  if (pipelines.has(cacheKey)) {
50
89
  getLogger().debug("HFT pipeline cache hit", { cacheKey });
@@ -62,13 +101,23 @@ var getPipeline = async (model, onProgress, options = {}, signal, progressScaleM
62
101
  });
63
102
  pipelineLoadPromises.set(cacheKey, loadPromise);
64
103
  return loadPromise;
65
- };
66
- var doGetPipeline = async (model, onProgress, options, progressScaleMax, cacheKey, signal) => {
104
+ }
105
+ var _transformersSdk, _cacheDir, modelAbortControllers, pipelines, pipelineLoadPromises, doGetPipeline = async (model, onProgress, options, progressScaleMax, cacheKey, signal) => {
67
106
  let lastProgressTime = 0;
68
107
  let pendingProgress = null;
69
108
  let throttleTimer = null;
70
109
  const THROTTLE_MS = 160;
71
- const sendProgress = (progress, file, fileProgress) => {
110
+ const buildProgressDetails = (file, fileProgress, filesMap) => {
111
+ const details = {
112
+ file,
113
+ progress: fileProgress
114
+ };
115
+ if (filesMap && Object.keys(filesMap).length > 0) {
116
+ details.files = filesMap;
117
+ }
118
+ return details;
119
+ };
120
+ const sendProgress = (progress, file, fileProgress, filesMap) => {
72
121
  const now = Date.now();
73
122
  const timeSinceLastEvent = now - lastProgressTime;
74
123
  const isFirst = lastProgressTime === 0;
@@ -79,21 +128,19 @@ var doGetPipeline = async (model, onProgress, options, progressScaleMax, cacheKe
79
128
  throttleTimer = null;
80
129
  }
81
130
  pendingProgress = null;
82
- onProgress(Math.round(progress), "Downloading model", { file, progress: fileProgress });
131
+ onProgress(Math.round(progress), "Downloading model", buildProgressDetails(file, fileProgress, filesMap));
83
132
  lastProgressTime = now;
84
133
  return;
85
134
  }
86
135
  if (timeSinceLastEvent < THROTTLE_MS) {
87
- pendingProgress = { progress, file, fileProgress };
136
+ pendingProgress = { progress, file, fileProgress, filesMap };
88
137
  if (!throttleTimer) {
89
138
  const timeRemaining = Math.max(1, THROTTLE_MS - timeSinceLastEvent);
90
139
  throttleTimer = setTimeout(() => {
91
140
  throttleTimer = null;
92
141
  if (pendingProgress) {
93
- onProgress(Math.round(pendingProgress.progress), "Downloading model", {
94
- file: pendingProgress.file,
95
- progress: pendingProgress.fileProgress
96
- });
142
+ const p = pendingProgress;
143
+ onProgress(Math.round(p.progress), "Downloading model", buildProgressDetails(p.file, p.fileProgress, p.filesMap));
97
144
  lastProgressTime = Date.now();
98
145
  pendingProgress = null;
99
146
  }
@@ -101,7 +148,7 @@ var doGetPipeline = async (model, onProgress, options, progressScaleMax, cacheKe
101
148
  }
102
149
  return;
103
150
  }
104
- onProgress(Math.round(progress), "Downloading model", { file, progress: fileProgress });
151
+ onProgress(Math.round(progress), "Downloading model", buildProgressDetails(file, fileProgress, filesMap));
105
152
  lastProgressTime = now;
106
153
  pendingProgress = null;
107
154
  };
@@ -141,13 +188,19 @@ var doGetPipeline = async (model, onProgress, options, progressScaleMax, cacheKe
141
188
  }
142
189
  }
143
190
  }
144
- sendProgress(scaledProgress, activeFile, activeFileProgress);
191
+ sendProgress(scaledProgress, activeFile, activeFileProgress, files);
145
192
  }
146
193
  };
194
+ let device = model.provider_config.device;
195
+ if (!isBrowserEnv()) {
196
+ if (device === "wasm" || device === "webgpu") {
197
+ device = undefined;
198
+ }
199
+ }
147
200
  const pipelineOptions = {
148
201
  dtype: model.provider_config.dtype || "q8",
149
202
  ...model.provider_config.use_external_data_format ? { useExternalDataFormat: model.provider_config.use_external_data_format } : {},
150
- ...model.provider_config.device ? { device: model.provider_config.device } : {},
203
+ ...device ? { device } : {},
151
204
  ...options,
152
205
  progress_callback: progressCallback
153
206
  };
@@ -168,10 +221,7 @@ var doGetPipeline = async (model, onProgress, options, progressScaleMax, cacheKe
168
221
  }
169
222
  const finalPending = pendingProgress;
170
223
  if (finalPending) {
171
- onProgress(Math.round(finalPending.progress), "Downloading model", {
172
- file: finalPending.file,
173
- progress: finalPending.fileProgress
174
- });
224
+ onProgress(Math.round(finalPending.progress), "Downloading model", buildProgressDetails(finalPending.file, finalPending.fileProgress, finalPending.filesMap));
175
225
  pendingProgress = null;
176
226
  }
177
227
  if (abortSignal?.aborted) {
@@ -191,234 +241,1068 @@ var doGetPipeline = async (model, onProgress, options, progressScaleMax, cacheKe
191
241
  modelAbortControllers.delete(modelPath);
192
242
  }
193
243
  };
194
- var HFT_Download = async (input, model, onProgress, signal) => {
195
- const logger = getLogger();
196
- const timerLabel = `hft:Download:${model?.provider_config.model_path}`;
197
- logger.time(timerLabel, { model: model?.provider_config.model_path });
198
- await getPipeline(model, onProgress, {}, signal, 100);
199
- logger.timeEnd(timerLabel, { model: model?.provider_config.model_path });
200
- return {
201
- model: input.model
202
- };
244
+ var init_HFT_Pipeline = __esm(() => {
245
+ modelAbortControllers = new Map;
246
+ pipelines = new Map;
247
+ pipelineLoadPromises = new Map;
248
+ });
249
+
250
+ // src/provider-hf-transformers/common/HFT_Constants.ts
251
+ var HF_TRANSFORMERS_ONNX = "HF_TRANSFORMERS_ONNX";
252
+ var HTF_CACHE_NAME = "transformers-cache";
253
+ var QuantizationDataType = {
254
+ auto: "auto",
255
+ fp32: "fp32",
256
+ fp16: "fp16",
257
+ q8: "q8",
258
+ int8: "int8",
259
+ uint8: "uint8",
260
+ q4: "q4",
261
+ bnb4: "bnb4",
262
+ q4f16: "q4f16"
203
263
  };
204
- var HFT_Unload = async (input, model, onProgress, signal) => {
205
- const cacheKey = getPipelineCacheKey(model);
206
- if (pipelines.has(cacheKey)) {
207
- pipelines.delete(cacheKey);
208
- onProgress(50, "Pipeline removed from memory");
209
- }
210
- const model_path = model.provider_config.model_path;
211
- await deleteModelCache(model_path);
212
- onProgress(100, "Model cache deleted");
213
- return {
214
- model: input.model
215
- };
264
+ var TextPipelineUseCase = {
265
+ "fill-mask": "fill-mask",
266
+ "token-classification": "token-classification",
267
+ "text-generation": "text-generation",
268
+ "text2text-generation": "text2text-generation",
269
+ "text-classification": "text-classification",
270
+ summarization: "summarization",
271
+ translation: "translation",
272
+ "feature-extraction": "feature-extraction",
273
+ "zero-shot-classification": "zero-shot-classification",
274
+ "question-answering": "question-answering"
216
275
  };
217
- var deleteModelCache = async (model_path) => {
218
- const cache = await caches.open(HTF_CACHE_NAME);
219
- const keys = await cache.keys();
220
- const prefix = `/${model_path}/`;
221
- const requestsToDelete = [];
222
- for (const request of keys) {
223
- const url = new URL(request.url);
224
- if (url.pathname.startsWith(prefix)) {
225
- requestsToDelete.push(request);
226
- }
227
- }
228
- let deletedCount = 0;
229
- for (const request of requestsToDelete) {
230
- try {
231
- const deleted = await cache.delete(request);
232
- if (deleted) {
233
- deletedCount++;
234
- } else {
235
- const deletedByUrl = await cache.delete(request.url);
236
- if (deletedByUrl) {
237
- deletedCount++;
276
+ var VisionPipelineUseCase = {
277
+ "background-removal": "background-removal",
278
+ "image-segmentation": "image-segmentation",
279
+ "depth-estimation": "depth-estimation",
280
+ "image-classification": "image-classification",
281
+ "image-to-image": "image-to-image",
282
+ "image-to-text": "image-to-text",
283
+ "object-detection": "object-detection",
284
+ "image-feature-extraction": "image-feature-extraction"
285
+ };
286
+ var AudioPipelineUseCase = {
287
+ "audio-classification": "audio-classification",
288
+ "automatic-speech-recognition": "automatic-speech-recognition",
289
+ "text-to-speech": "text-to-speech"
290
+ };
291
+ var MultimodalPipelineUseCase = {
292
+ "document-question-answering": "document-question-answering",
293
+ "image-to-text": "image-to-text",
294
+ "zero-shot-audio-classification": "zero-shot-audio-classification",
295
+ "zero-shot-image-classification": "zero-shot-image-classification",
296
+ "zero-shot-object-detection": "zero-shot-object-detection"
297
+ };
298
+ var PipelineUseCase = {
299
+ ...TextPipelineUseCase,
300
+ ...VisionPipelineUseCase,
301
+ ...AudioPipelineUseCase,
302
+ ...MultimodalPipelineUseCase
303
+ };
304
+ // src/provider-hf-transformers/common/HFT_ModelSchema.ts
305
+ import { ModelConfigSchema, ModelRecordSchema } from "@workglow/ai/worker";
306
+ var HfTransformersOnnxModelSchema = {
307
+ type: "object",
308
+ properties: {
309
+ provider: {
310
+ const: HF_TRANSFORMERS_ONNX,
311
+ description: "Discriminator: ONNX runtime backend."
312
+ },
313
+ provider_config: {
314
+ type: "object",
315
+ description: "ONNX runtime-specific options.",
316
+ properties: {
317
+ pipeline: {
318
+ type: "string",
319
+ enum: Object.values(PipelineUseCase),
320
+ description: "Pipeline type for the ONNX model.",
321
+ default: "text-generation"
322
+ },
323
+ model_path: {
324
+ type: "string",
325
+ description: "Filesystem path or URI for the ONNX model."
326
+ },
327
+ dtype: {
328
+ type: "string",
329
+ enum: Object.values(QuantizationDataType),
330
+ description: "Data type for the ONNX model.",
331
+ default: "auto"
332
+ },
333
+ device: {
334
+ type: "string",
335
+ enum: ["cpu", "gpu", "webgpu", "wasm", "metal"],
336
+ description: "High-level device selection.",
337
+ default: "webgpu"
338
+ },
339
+ execution_providers: {
340
+ type: "array",
341
+ items: { type: "string" },
342
+ description: "Raw ONNX Runtime execution provider identifiers.",
343
+ "x-ui-hidden": true
344
+ },
345
+ intra_op_num_threads: {
346
+ type: "integer",
347
+ minimum: 1
348
+ },
349
+ inter_op_num_threads: {
350
+ type: "integer",
351
+ minimum: 1
352
+ },
353
+ use_external_data_format: {
354
+ type: "boolean",
355
+ description: "Whether the model uses external data format."
356
+ },
357
+ native_dimensions: {
358
+ type: "integer",
359
+ description: "The native dimensions of the model."
360
+ },
361
+ pooling: {
362
+ type: "string",
363
+ enum: ["mean", "last_token", "cls"],
364
+ description: "The pooling strategy to use for the model.",
365
+ default: "mean"
366
+ },
367
+ normalize: {
368
+ type: "boolean",
369
+ description: "Whether the model uses normalization.",
370
+ default: true
371
+ },
372
+ language_style: {
373
+ type: "string",
374
+ description: "The language style of the model."
375
+ },
376
+ mrl: {
377
+ type: "boolean",
378
+ description: "Whether the model uses matryoshka.",
379
+ default: false
238
380
  }
381
+ },
382
+ required: ["model_path", "pipeline"],
383
+ additionalProperties: false,
384
+ if: {
385
+ properties: {
386
+ pipeline: {
387
+ const: "feature-extraction"
388
+ }
389
+ }
390
+ },
391
+ then: {
392
+ required: ["native_dimensions"]
239
393
  }
240
- } catch (error) {
241
- console.error(`Failed to delete cache entry: ${request.url}`, error);
242
394
  }
243
- }
395
+ },
396
+ required: ["provider", "provider_config"],
397
+ additionalProperties: true
244
398
  };
245
- var HFT_TextEmbedding = async (input, model, onProgress, signal) => {
246
- const logger = getLogger();
247
- const uuid = crypto.randomUUID();
248
- const timerLabel = `hft:TextEmbedding:${model?.provider_config.model_path}:${uuid}`;
249
- logger.time(timerLabel, { model: model?.provider_config.model_path });
250
- const generateEmbedding = await getPipeline(model, onProgress, {}, signal);
251
- logger.debug("HFT TextEmbedding: pipeline ready, generating embedding", {
252
- model: model?.provider_config.model_path,
253
- inputLength: Array.isArray(input.text) ? input.text.length : input.text?.length
254
- });
255
- const hfVector = await generateEmbedding(input.text, {
256
- pooling: model?.provider_config.pooling || "mean",
257
- normalize: model?.provider_config.normalize
258
- });
259
- const isArrayInput = Array.isArray(input.text);
260
- const embeddingDim = model?.provider_config.native_dimensions;
261
- if (isArrayInput && hfVector.dims.length > 1) {
262
- const [numTexts, vectorDim] = hfVector.dims;
263
- if (numTexts !== input.text.length) {
264
- throw new Error(`HuggingFace Embedding tensor batch size does not match input array length: ${numTexts} != ${input.text.length}`);
265
- }
266
- if (vectorDim !== embeddingDim) {
267
- throw new Error(`HuggingFace Embedding vector dimension does not match model dimensions: ${vectorDim} != ${embeddingDim}`);
268
- }
269
- const vectors = Array.from({ length: numTexts }, (_, i) => hfVector[i].data.slice());
270
- logger.timeEnd(timerLabel, { batchSize: numTexts, dimensions: vectorDim });
271
- return { vector: vectors };
272
- }
273
- if (hfVector.size !== embeddingDim) {
274
- logger.timeEnd(timerLabel, { status: "error", reason: "dimension mismatch" });
275
- console.warn(`HuggingFace Embedding vector length does not match model dimensions v${hfVector.size} != m${embeddingDim}`, input, hfVector);
276
- throw new Error(`HuggingFace Embedding vector length does not match model dimensions v${hfVector.size} != m${embeddingDim}`);
277
- }
278
- logger.timeEnd(timerLabel, { dimensions: hfVector.size });
279
- return { vector: hfVector.data };
399
+ var HfTransformersOnnxModelRecordSchema = {
400
+ type: "object",
401
+ properties: {
402
+ ...ModelRecordSchema.properties,
403
+ ...HfTransformersOnnxModelSchema.properties
404
+ },
405
+ required: [...ModelRecordSchema.required, ...HfTransformersOnnxModelSchema.required],
406
+ additionalProperties: false
280
407
  };
281
- var HFT_TextClassification = async (input, model, onProgress, signal) => {
282
- const isArrayInput = Array.isArray(input.text);
283
- if (model?.provider_config?.pipeline === "zero-shot-classification") {
284
- if (!input.candidateLabels || !Array.isArray(input.candidateLabels) || input.candidateLabels.length === 0) {
285
- throw new Error("Zero-shot text classification requires candidate labels");
408
+ var HfTransformersOnnxModelConfigSchema = {
409
+ type: "object",
410
+ properties: {
411
+ ...ModelConfigSchema.properties,
412
+ ...HfTransformersOnnxModelSchema.properties
413
+ },
414
+ required: [...ModelConfigSchema.required, ...HfTransformersOnnxModelSchema.required],
415
+ additionalProperties: false
416
+ };
417
+ // src/provider-hf-transformers/common/HFT_OnnxDtypes.ts
418
+ var ONNX_QUANTIZATION_SUFFIX_MAPPING = {
419
+ fp32: "",
420
+ fp16: "_fp16",
421
+ int8: "_int8",
422
+ uint8: "_uint8",
423
+ q8: "_quantized",
424
+ q4: "_q4",
425
+ q4f16: "_q4f16",
426
+ bnb4: "_bnb4"
427
+ };
428
+ var SUFFIXES_LONGEST_FIRST = Object.entries(ONNX_QUANTIZATION_SUFFIX_MAPPING).filter(([, suffix]) => suffix !== "").sort((a, b) => b[1].length - a[1].length);
429
+ function parseOnnxQuantizations(params) {
430
+ const subfolder = params.subfolder ?? "onnx";
431
+ const prefix = subfolder + "/";
432
+ const stems = [];
433
+ for (const fp of params.filePaths) {
434
+ if (!fp.startsWith(prefix))
435
+ continue;
436
+ if (!fp.endsWith(".onnx"))
437
+ continue;
438
+ if (fp.endsWith(".onnx_data"))
439
+ continue;
440
+ stems.push(fp.slice(prefix.length, -".onnx".length));
441
+ }
442
+ if (stems.length === 0)
443
+ return [];
444
+ const parsed = [];
445
+ for (const stem of stems) {
446
+ let matched = false;
447
+ for (const [dtype, suffix] of SUFFIXES_LONGEST_FIRST) {
448
+ if (stem.endsWith(suffix)) {
449
+ parsed.push({ baseName: stem.slice(0, -suffix.length), dtype });
450
+ matched = true;
451
+ break;
452
+ }
286
453
  }
287
- const zeroShotClassifier = await getPipeline(model, onProgress, {}, signal);
288
- const result2 = await zeroShotClassifier(input.text, input.candidateLabels, {});
289
- if (isArrayInput) {
290
- const results = Array.isArray(result2) && Array.isArray(result2[0]?.labels) ? result2 : [result2];
291
- return {
292
- categories: results.map((r) => r.labels.map((label, idx) => ({
293
- label,
294
- score: r.scores[idx]
295
- })))
296
- };
454
+ if (!matched) {
455
+ parsed.push({ baseName: stem, dtype: "fp32" });
297
456
  }
298
- return {
299
- categories: result2.labels.map((label, idx) => ({
300
- label,
301
- score: result2.scores[idx]
302
- }))
303
- };
304
- }
305
- const TextClassification = await getPipeline(model, onProgress, {}, signal);
306
- const result = await TextClassification(input.text, {
307
- top_k: input.maxCategories || undefined
308
- });
309
- if (isArrayInput) {
310
- return {
311
- categories: result.map((perInput) => {
312
- const items = Array.isArray(perInput) ? perInput : [perInput];
313
- return items.map((category) => ({
314
- label: category.label,
315
- score: category.score
316
- }));
317
- })
318
- };
319
- }
320
- if (Array.isArray(result[0])) {
321
- return {
322
- categories: result[0].map((category) => ({
323
- label: category.label,
324
- score: category.score
325
- }))
326
- };
327
- }
328
- return {
329
- categories: result.map((category) => ({
330
- label: category.label,
331
- score: category.score
332
- }))
333
- };
334
- };
335
- var HFT_TextLanguageDetection = async (input, model, onProgress, signal) => {
336
- const isArrayInput = Array.isArray(input.text);
337
- const TextClassification = await getPipeline(model, onProgress, {}, signal);
338
- const result = await TextClassification(input.text, {
339
- top_k: input.maxLanguages || undefined
340
- });
341
- if (isArrayInput) {
342
- return {
343
- languages: result.map((perInput) => {
344
- const items = Array.isArray(perInput) ? perInput : [perInput];
345
- return items.map((category) => ({
346
- language: category.label,
347
- score: category.score
348
- }));
349
- })
350
- };
351
457
  }
352
- if (Array.isArray(result[0])) {
353
- return {
354
- languages: result[0].map((category) => ({
355
- language: category.label,
356
- score: category.score
357
- }))
358
- };
458
+ const allBaseNames = new Set(parsed.map((p) => p.baseName));
459
+ const byDtype = new Map;
460
+ for (const { baseName, dtype } of parsed) {
461
+ let set = byDtype.get(dtype);
462
+ if (!set) {
463
+ set = new Set;
464
+ byDtype.set(dtype, set);
465
+ }
466
+ set.add(baseName);
359
467
  }
360
- return {
361
- languages: result.map((category) => ({
362
- language: category.label,
363
- score: category.score
364
- }))
365
- };
366
- };
367
- var HFT_TextNamedEntityRecognition = async (input, model, onProgress, signal) => {
368
- const isArrayInput = Array.isArray(input.text);
369
- const textNamedEntityRecognition = await getPipeline(model, onProgress, {}, signal);
370
- const results = await textNamedEntityRecognition(input.text, {
371
- ignore_labels: input.blockList
468
+ const allDtypes = Object.keys(ONNX_QUANTIZATION_SUFFIX_MAPPING);
469
+ return allDtypes.filter((dtype) => {
470
+ const set = byDtype.get(dtype);
471
+ return set !== undefined && set.size === allBaseNames.size;
372
472
  });
373
- if (isArrayInput) {
374
- return {
375
- entities: results.map((perInput) => {
376
- const items = Array.isArray(perInput) ? perInput : [perInput];
377
- return items.map((entity) => ({
378
- entity: entity.entity,
379
- score: entity.score,
380
- word: entity.word
381
- }));
382
- })
383
- };
384
- }
385
- let entities = [];
386
- if (!Array.isArray(results)) {
387
- entities = [results];
388
- } else {
389
- entities = results;
390
- }
391
- return {
392
- entities: entities.map((entity) => ({
393
- entity: entity.entity,
394
- score: entity.score,
395
- word: entity.word
396
- }))
397
- };
398
- };
399
- var HFT_TextFillMask = async (input, model, onProgress, signal) => {
400
- const isArrayInput = Array.isArray(input.text);
401
- const unmasker = await getPipeline(model, onProgress, {}, signal);
402
- const results = await unmasker(input.text);
403
- if (isArrayInput) {
404
- return {
405
- predictions: results.map((perInput) => {
406
- const items = Array.isArray(perInput) ? perInput : [perInput];
407
- return items.map((prediction) => ({
408
- entity: prediction.token_str,
409
- score: prediction.score,
410
- sequence: prediction.sequence
411
- }));
412
- })
413
- };
473
+ }
474
+ // src/provider-hf-transformers/common/HFT_ToolMarkup.ts
475
+ function parseToolCallsFromText(responseText) {
476
+ const toolCalls = [];
477
+ let callIndex = 0;
478
+ let cleanedText = responseText;
479
+ const toolCallTagRegex = /<tool_call>([\s\S]*?)<\/tool_call>/g;
480
+ let tagMatch;
481
+ while ((tagMatch = toolCallTagRegex.exec(responseText)) !== null) {
482
+ try {
483
+ const parsed = JSON.parse(tagMatch[1].trim());
484
+ const id = `call_${callIndex++}`;
485
+ toolCalls.push({
486
+ id,
487
+ name: parsed.name ?? parsed.function?.name ?? "",
488
+ input: parsed.arguments ?? parsed.function?.arguments ?? parsed.parameters ?? {}
489
+ });
490
+ } catch {}
414
491
  }
415
- let predictions = [];
416
- if (!Array.isArray(results)) {
417
- predictions = [results];
418
- } else {
419
- predictions = results;
492
+ if (toolCalls.length > 0) {
493
+ cleanedText = responseText.replace(/<tool_call>[\s\S]*?<\/tool_call>/g, "").trim();
494
+ return { text: cleanedText, toolCalls };
420
495
  }
421
- return {
496
+ const jsonCandidates = [];
497
+ (function collectBalancedJsonBlocks(source) {
498
+ const length = source.length;
499
+ let i = 0;
500
+ while (i < length) {
501
+ if (source[i] !== "{") {
502
+ i++;
503
+ continue;
504
+ }
505
+ let depth = 1;
506
+ let j = i + 1;
507
+ let inString = false;
508
+ let escape = false;
509
+ while (j < length && depth > 0) {
510
+ const ch = source[j];
511
+ if (inString) {
512
+ if (escape) {
513
+ escape = false;
514
+ } else if (ch === "\\") {
515
+ escape = true;
516
+ } else if (ch === '"') {
517
+ inString = false;
518
+ }
519
+ } else {
520
+ if (ch === '"') {
521
+ inString = true;
522
+ } else if (ch === "{") {
523
+ depth++;
524
+ } else if (ch === "}") {
525
+ depth--;
526
+ }
527
+ }
528
+ j++;
529
+ }
530
+ if (depth === 0) {
531
+ jsonCandidates.push({ text: source.slice(i, j), start: i, end: j });
532
+ i = j;
533
+ } else {
534
+ break;
535
+ }
536
+ }
537
+ })(responseText);
538
+ const matchedRanges = [];
539
+ for (const candidate of jsonCandidates) {
540
+ try {
541
+ const parsed = JSON.parse(candidate.text);
542
+ if (parsed.name && (parsed.arguments !== undefined || parsed.parameters !== undefined)) {
543
+ const id = `call_${callIndex++}`;
544
+ toolCalls.push({
545
+ id,
546
+ name: parsed.name,
547
+ input: parsed.arguments ?? parsed.parameters ?? {}
548
+ });
549
+ matchedRanges.push({ start: candidate.start, end: candidate.end });
550
+ } else if (parsed.function?.name) {
551
+ let functionArgs = parsed.function.arguments ?? {};
552
+ if (typeof functionArgs === "string") {
553
+ try {
554
+ functionArgs = JSON.parse(functionArgs);
555
+ } catch (innerError) {
556
+ console.warn("Failed to parse tool call function.arguments as JSON", innerError);
557
+ functionArgs = {};
558
+ }
559
+ }
560
+ const id = `call_${callIndex++}`;
561
+ toolCalls.push({
562
+ id,
563
+ name: parsed.function.name,
564
+ input: functionArgs ?? {}
565
+ });
566
+ matchedRanges.push({ start: candidate.start, end: candidate.end });
567
+ }
568
+ } catch {}
569
+ }
570
+ if (toolCalls.length > 0) {
571
+ let result = "";
572
+ let lastIndex = 0;
573
+ for (const range of matchedRanges) {
574
+ result += responseText.slice(lastIndex, range.start);
575
+ lastIndex = range.end;
576
+ }
577
+ result += responseText.slice(lastIndex);
578
+ cleanedText = result.trim();
579
+ }
580
+ return { text: cleanedText, toolCalls };
581
+ }
582
+ function createToolCallMarkupFilter(emit) {
583
+ const OPEN_TAG = "<tool_call>";
584
+ const CLOSE_TAG = "</tool_call>";
585
+ let state = "text";
586
+ let pending = "";
587
+ function feed(token) {
588
+ if (state === "tag") {
589
+ pending += token;
590
+ const closeIdx = pending.indexOf(CLOSE_TAG);
591
+ if (closeIdx !== -1) {
592
+ const afterClose = pending.slice(closeIdx + CLOSE_TAG.length);
593
+ pending = "";
594
+ state = "text";
595
+ if (afterClose.length > 0) {
596
+ feed(afterClose);
597
+ }
598
+ }
599
+ return;
600
+ }
601
+ const combined = pending + token;
602
+ const openIdx = combined.indexOf(OPEN_TAG);
603
+ if (openIdx !== -1) {
604
+ const before = combined.slice(0, openIdx);
605
+ if (before.length > 0) {
606
+ emit(before);
607
+ }
608
+ pending = "";
609
+ state = "tag";
610
+ const afterOpen = combined.slice(openIdx + OPEN_TAG.length);
611
+ if (afterOpen.length > 0) {
612
+ feed(afterOpen);
613
+ }
614
+ return;
615
+ }
616
+ let prefixLen = 0;
617
+ for (let len = Math.min(combined.length, OPEN_TAG.length - 1);len >= 1; len--) {
618
+ if (combined.endsWith(OPEN_TAG.slice(0, len))) {
619
+ prefixLen = len;
620
+ break;
621
+ }
622
+ }
623
+ if (prefixLen > 0) {
624
+ const safe = combined.slice(0, combined.length - prefixLen);
625
+ if (safe.length > 0) {
626
+ emit(safe);
627
+ }
628
+ pending = combined.slice(combined.length - prefixLen);
629
+ } else {
630
+ if (combined.length > 0) {
631
+ emit(combined);
632
+ }
633
+ pending = "";
634
+ }
635
+ }
636
+ function flush() {
637
+ if (pending.length > 0 && state === "text") {
638
+ emit(pending);
639
+ pending = "";
640
+ }
641
+ pending = "";
642
+ state = "text";
643
+ }
644
+ return { feed, flush };
645
+ }
646
+ // src/provider-hf-transformers/common/HFT_InlineLifecycle.ts
647
+ async function clearHftInlinePipelineCache() {
648
+ const { clearPipelineCache: clearPipelineCache2 } = await Promise.resolve().then(() => (init_HFT_Pipeline(), exports_HFT_Pipeline));
649
+ clearPipelineCache2();
650
+ }
651
+
652
+ // src/common/PipelineTaskMapping.ts
653
+ var TASK_TO_PIPELINES = {
654
+ TextEmbeddingTask: ["feature-extraction"],
655
+ TextGenerationTask: ["text-generation"],
656
+ TextSummaryTask: ["sentence-similarity", "summarization"],
657
+ TextTranslationTask: ["translation"],
658
+ TextClassificationTask: ["text-classification", "zero-shot-classification"],
659
+ TextQuestionAnswerTask: ["question-answering"],
660
+ TextFillMaskTask: ["fill-mask"],
661
+ TextLanguageDetectionTask: ["text-classification"],
662
+ TextNamedEntityRecognitionTask: ["token-classification"],
663
+ TokenClassificationTask: ["token-classification"],
664
+ ImageClassificationTask: ["image-classification", "zero-shot-image-classification"],
665
+ ImageEmbeddingTask: ["image-feature-extraction"],
666
+ ImageSegmentationTask: ["image-segmentation"],
667
+ ImageToImageTask: ["image-to-image"],
668
+ ImageToTextTask: ["image-to-text"],
669
+ ObjectDetectionTask: ["object-detection", "zero-shot-object-detection"],
670
+ DepthEstimationTask: ["depth-estimation"],
671
+ AudioClassificationTask: ["audio-classification"],
672
+ SpeechRecognitionTask: ["automatic-speech-recognition"]
673
+ };
674
+ function pipelineToTaskTypes(pipeline) {
675
+ return Object.entries(TASK_TO_PIPELINES).filter(([, pipelines2]) => pipelines2.includes(pipeline)).map(([task]) => task);
676
+ }
677
+
678
+ // src/common/HfModelSearch.ts
679
+ var HF_API_BASE = "https://huggingface.co/api";
680
+ function formatDownloads(n) {
681
+ if (n >= 1e6)
682
+ return `${(n / 1e6).toFixed(1)}M`;
683
+ if (n >= 1000)
684
+ return `${(n / 1000).toFixed(1)}k`;
685
+ return String(n);
686
+ }
687
+ function mapHfProviderConfig(entry, provider) {
688
+ switch (provider) {
689
+ case "HF_TRANSFORMERS_ONNX":
690
+ return {
691
+ model_path: entry.id,
692
+ ...entry.pipeline_tag ? { pipeline: entry.pipeline_tag } : {}
693
+ };
694
+ case "LOCAL_LLAMACPP":
695
+ return { model_path: entry.id };
696
+ default:
697
+ return { model_name: entry.id };
698
+ }
699
+ }
700
+ function mapHfModelResult(entry, provider) {
701
+ const badges = [entry.pipeline_tag, entry.library_name].filter(Boolean).join(" | ");
702
+ return {
703
+ id: entry.id,
704
+ label: `${entry.id}${badges ? ` ${badges}` : ""}`,
705
+ description: `${formatDownloads(entry.downloads)} downloads`,
706
+ record: {
707
+ model_id: entry.id,
708
+ provider,
709
+ title: entry.id.split("/").pop() ?? entry.id,
710
+ description: [entry.pipeline_tag, `${formatDownloads(entry.downloads)} downloads`].filter(Boolean).join(" — "),
711
+ tasks: entry.pipeline_tag ? pipelineToTaskTypes(entry.pipeline_tag) : [],
712
+ provider_config: mapHfProviderConfig(entry, provider),
713
+ metadata: {}
714
+ },
715
+ raw: entry
716
+ };
717
+ }
718
+ async function searchHfModels(query, extraParams, expandFields, signal) {
719
+ const params = new URLSearchParams({
720
+ search: query,
721
+ limit: "500",
722
+ sort: "downloads",
723
+ direction: "-1",
724
+ ...extraParams
725
+ });
726
+ params.append("expand[]", "pipeline_tag");
727
+ if (expandFields) {
728
+ for (const field of expandFields) {
729
+ params.append("expand[]", field);
730
+ }
731
+ }
732
+ const res = await fetch(`${HF_API_BASE}/models?${params}`, { signal });
733
+ if (!res.ok)
734
+ throw new Error(`HuggingFace API returned ${res.status}`);
735
+ return res.json();
736
+ }
737
+
738
+ // src/provider-hf-transformers/common/HFT_ModelSearch.ts
739
+ var HFT_ModelSearch = async (input, _model, _onProgress, signal) => {
740
+ const entries = await searchHfModels(input.query, { filter: "onnx" }, ["siblings"], signal);
741
+ const results = entries.map((entry) => {
742
+ const item = mapHfModelResult(entry, HF_TRANSFORMERS_ONNX);
743
+ if (entry.siblings && entry.siblings.length > 0) {
744
+ const filePaths = entry.siblings.map((s) => s.rfilename);
745
+ const quantizations = parseOnnxQuantizations({ filePaths });
746
+ if (quantizations.length > 0) {
747
+ const record = item.record;
748
+ const providerConfig = record.provider_config ?? {};
749
+ providerConfig.quantizations = quantizations;
750
+ record.provider_config = providerConfig;
751
+ }
752
+ }
753
+ const raw = item.raw;
754
+ delete raw.siblings;
755
+ return item;
756
+ });
757
+ return { results };
758
+ };
759
+
760
+ // src/provider-hf-transformers/common/HFT_ImageHelpers.ts
761
+ function imageToBase64(image) {
762
+ return image.toBase64?.() || "";
763
+ }
764
+
765
+ // src/provider-hf-transformers/common/HFT_BackgroundRemoval.ts
766
+ init_HFT_Pipeline();
767
+ var HFT_BackgroundRemoval = async (input, model, onProgress, signal) => {
768
+ const remover = await getPipeline(model, onProgress, {}, signal);
769
+ const result = await remover(input.image);
770
+ const resultImage = Array.isArray(result) ? result[0] : result;
771
+ return {
772
+ image: imageToBase64(resultImage)
773
+ };
774
+ };
775
+
776
+ // src/provider-hf-transformers/common/HFT_CountTokens.ts
777
+ init_HFT_Pipeline();
778
+ var HFT_CountTokens = async (input, model, onProgress, _signal) => {
779
+ const isArrayInput = Array.isArray(input.text);
780
+ const { AutoTokenizer } = await loadTransformersSDK();
781
+ const tokenizer = await AutoTokenizer.from_pretrained(model.provider_config.model_path, {
782
+ progress_callback: (progress) => onProgress(progress?.progress ?? 0)
783
+ });
784
+ if (isArrayInput) {
785
+ const texts = input.text;
786
+ const counts = texts.map((t) => tokenizer.encode(t).length);
787
+ return { count: counts };
788
+ }
789
+ const tokenIds = tokenizer.encode(input.text);
790
+ return { count: tokenIds.length };
791
+ };
792
+ var HFT_CountTokens_Reactive = async (input, _output, model) => {
793
+ return HFT_CountTokens(input, model, () => {}, new AbortController().signal);
794
+ };
795
+
796
+ // src/provider-hf-transformers/common/HFT_Download.ts
797
+ init_HFT_Pipeline();
798
+ import { getLogger as getLogger2 } from "@workglow/util/worker";
799
+ var HFT_Download = async (input, model, onProgress, signal) => {
800
+ const logger = getLogger2();
801
+ const timerLabel = `hft:Download:${model?.provider_config.model_path}`;
802
+ logger.time(timerLabel, { model: model?.provider_config.model_path });
803
+ await getPipeline(model, onProgress, {}, signal, 100);
804
+ logger.timeEnd(timerLabel, { model: model?.provider_config.model_path });
805
+ return {
806
+ model: input.model
807
+ };
808
+ };
809
+
810
+ // src/provider-hf-transformers/common/HFT_ImageClassification.ts
811
+ init_HFT_Pipeline();
812
+ var HFT_ImageClassification = async (input, model, onProgress, signal) => {
813
+ if (model?.provider_config?.pipeline === "zero-shot-image-classification") {
814
+ if (!input.categories || !Array.isArray(input.categories) || input.categories.length === 0) {
815
+ console.warn("Zero-shot image classification requires categories", input);
816
+ throw new Error("Zero-shot image classification requires categories");
817
+ }
818
+ const zeroShotClassifier = await getPipeline(model, onProgress, {}, signal);
819
+ const result2 = await zeroShotClassifier(input.image, input.categories, {});
820
+ const results2 = Array.isArray(result2) ? result2 : [result2];
821
+ return {
822
+ categories: results2.map((r) => ({
823
+ label: r.label,
824
+ score: r.score
825
+ }))
826
+ };
827
+ }
828
+ const classifier = await getPipeline(model, onProgress, {}, signal);
829
+ const result = await classifier(input.image, {
830
+ top_k: input.maxCategories
831
+ });
832
+ const results = Array.isArray(result) ? result : [result];
833
+ return {
834
+ categories: results.map((r) => ({
835
+ label: r.label,
836
+ score: r.score
837
+ }))
838
+ };
839
+ };
840
+
841
+ // src/provider-hf-transformers/common/HFT_ImageEmbedding.ts
842
+ init_HFT_Pipeline();
843
+ import { getLogger as getLogger3 } from "@workglow/util/worker";
844
+ var HFT_ImageEmbedding = async (input, model, onProgress, signal) => {
845
+ const logger = getLogger3();
846
+ const timerLabel = `hft:ImageEmbedding:${model?.provider_config.model_path}`;
847
+ logger.time(timerLabel, { model: model?.provider_config.model_path });
848
+ const embedder = await getPipeline(model, onProgress, {}, signal);
849
+ logger.debug("HFT ImageEmbedding: pipeline ready, generating embedding", {
850
+ model: model?.provider_config.model_path
851
+ });
852
+ const result = await embedder(input.image);
853
+ logger.timeEnd(timerLabel, { dimensions: result?.data?.length });
854
+ return {
855
+ vector: result.data
856
+ };
857
+ };
858
+
859
+ // src/provider-hf-transformers/common/HFT_ImageSegmentation.ts
860
+ init_HFT_Pipeline();
861
+ var HFT_ImageSegmentation = async (input, model, onProgress, signal) => {
862
+ const segmenter = await getPipeline(model, onProgress, {}, signal);
863
+ const result = await segmenter(input.image, {
864
+ threshold: input.threshold,
865
+ mask_threshold: input.maskThreshold
866
+ });
867
+ const masks = Array.isArray(result) ? result : [result];
868
+ const processedMasks = await Promise.all(masks.map(async (mask) => ({
869
+ label: mask.label || "",
870
+ score: mask.score || 0,
871
+ mask: {}
872
+ })));
873
+ return {
874
+ masks: processedMasks
875
+ };
876
+ };
877
+
878
+ // src/provider-hf-transformers/common/HFT_ImageToText.ts
879
+ init_HFT_Pipeline();
880
+ var HFT_ImageToText = async (input, model, onProgress, signal) => {
881
+ const captioner = await getPipeline(model, onProgress, {}, signal);
882
+ const result = await captioner(input.image, {
883
+ max_new_tokens: input.maxTokens
884
+ });
885
+ const text = Array.isArray(result) ? result[0]?.generated_text : result?.generated_text;
886
+ return {
887
+ text: text || ""
888
+ };
889
+ };
890
+
891
+ // src/provider-hf-transformers/common/HFT_ModelInfo.ts
892
+ import { getLogger as getLogger4 } from "@workglow/util/worker";
893
+ init_HFT_Pipeline();
894
+ var HFT_ModelInfo = async (input, model) => {
895
+ const logger = getLogger4();
896
+ const { ModelRegistry } = await loadTransformersSDK();
897
+ const timerLabel = `hft:ModelInfo:${model?.provider_config.model_path}`;
898
+ logger.time(timerLabel, { model: model?.provider_config.model_path });
899
+ const detail = input.detail;
900
+ const is_loaded = hasCachedPipeline(getPipelineCacheKey(model));
901
+ const { pipeline: pipelineType, model_path, dtype } = model.provider_config;
902
+ const cacheStatus = await ModelRegistry.is_pipeline_cached_files(pipelineType, model_path, {
903
+ ...dtype ? { dtype } : {}
904
+ });
905
+ logger.debug("is_pipeline_cached", {
906
+ input: [
907
+ pipelineType,
908
+ model_path,
909
+ {
910
+ ...dtype ? { dtype } : {}
911
+ }
912
+ ],
913
+ result: cacheStatus
914
+ });
915
+ const is_cached = is_loaded || cacheStatus.allCached;
916
+ let file_sizes = null;
917
+ if (detail === "files" && cacheStatus.files.length > 0) {
918
+ const sizes = {};
919
+ for (const { file } of cacheStatus.files) {
920
+ sizes[file] = 0;
921
+ }
922
+ file_sizes = sizes;
923
+ } else if (detail === "files_with_metadata" && cacheStatus.files.length > 0) {
924
+ const sizes = {};
925
+ await Promise.all(cacheStatus.files.map(async ({ file }) => {
926
+ const metadata = await ModelRegistry.get_file_metadata(model_path, file);
927
+ if (metadata.exists && metadata.size !== undefined) {
928
+ sizes[file] = metadata.size;
929
+ }
930
+ }));
931
+ if (Object.keys(sizes).length > 0) {
932
+ file_sizes = sizes;
933
+ }
934
+ }
935
+ let quantizations;
936
+ if (cacheStatus.files.length > 0) {
937
+ const filePaths = cacheStatus.files.map((f) => f.file);
938
+ const quantizations_parsed = parseOnnxQuantizations({ filePaths });
939
+ if (quantizations_parsed.length > 0) {
940
+ quantizations = quantizations_parsed;
941
+ }
942
+ }
943
+ logger.timeEnd(timerLabel, { model: model?.provider_config.model_path });
944
+ return {
945
+ model: input.model,
946
+ is_local: true,
947
+ is_remote: false,
948
+ supports_browser: true,
949
+ supports_node: true,
950
+ is_cached,
951
+ is_loaded,
952
+ file_sizes,
953
+ ...quantizations ? { quantizations } : {}
954
+ };
955
+ };
956
+
957
+ // src/provider-hf-transformers/common/HFT_ObjectDetection.ts
958
+ init_HFT_Pipeline();
959
+ var HFT_ObjectDetection = async (input, model, onProgress, signal) => {
960
+ if (model?.provider_config?.pipeline === "zero-shot-object-detection") {
961
+ if (!input.labels || !Array.isArray(input.labels) || input.labels.length === 0) {
962
+ throw new Error("Zero-shot object detection requires labels");
963
+ }
964
+ const zeroShotDetector = await getPipeline(model, onProgress, {}, signal);
965
+ const result2 = await zeroShotDetector(input.image, Array.from(input.labels), {
966
+ threshold: input.threshold
967
+ });
968
+ const detections2 = Array.isArray(result2) ? result2 : [result2];
969
+ return {
970
+ detections: detections2.map((d) => ({
971
+ label: d.label,
972
+ score: d.score,
973
+ box: d.box
974
+ }))
975
+ };
976
+ }
977
+ const detector = await getPipeline(model, onProgress, {}, signal);
978
+ const result = await detector(input.image, {
979
+ threshold: input.threshold
980
+ });
981
+ const detections = Array.isArray(result) ? result : [result];
982
+ return {
983
+ detections: detections.map((d) => ({
984
+ label: d.label,
985
+ score: d.score,
986
+ box: d.box
987
+ }))
988
+ };
989
+ };
990
+
991
+ // src/provider-hf-transformers/common/HFT_StructuredGeneration.ts
992
+ init_HFT_Pipeline();
993
+ import { parsePartialJson } from "@workglow/util/worker";
994
+
995
+ // src/provider-hf-transformers/common/HFT_Streaming.ts
996
+ function createStreamEventQueue() {
997
+ const buffer = [];
998
+ let resolve = null;
999
+ let finished = false;
1000
+ let err = null;
1001
+ const push = (event) => {
1002
+ if (resolve) {
1003
+ const r = resolve;
1004
+ resolve = null;
1005
+ r({ value: event, done: false });
1006
+ } else {
1007
+ buffer.push(event);
1008
+ }
1009
+ };
1010
+ const done = () => {
1011
+ finished = true;
1012
+ if (resolve) {
1013
+ const r = resolve;
1014
+ resolve = null;
1015
+ r({ value: undefined, done: true });
1016
+ }
1017
+ };
1018
+ const error = (e) => {
1019
+ err = e;
1020
+ if (resolve) {
1021
+ const r = resolve;
1022
+ resolve = null;
1023
+ r({ value: undefined, done: true });
1024
+ }
1025
+ };
1026
+ const iterable = {
1027
+ [Symbol.asyncIterator]() {
1028
+ return {
1029
+ next() {
1030
+ if (err)
1031
+ return Promise.reject(err);
1032
+ if (buffer.length > 0) {
1033
+ return Promise.resolve({ value: buffer.shift(), done: false });
1034
+ }
1035
+ if (finished) {
1036
+ return Promise.resolve({ value: undefined, done: true });
1037
+ }
1038
+ return new Promise((r) => {
1039
+ resolve = r;
1040
+ });
1041
+ }
1042
+ };
1043
+ }
1044
+ };
1045
+ return { push, done, error, iterable };
1046
+ }
1047
+ function createStreamingTextStreamer(tokenizer, queue, transformers) {
1048
+ const { TextStreamer } = transformers;
1049
+ return new TextStreamer(tokenizer, {
1050
+ skip_prompt: true,
1051
+ decode_kwargs: { skip_special_tokens: true },
1052
+ callback_function: (text) => {
1053
+ queue.push({ type: "text-delta", port: "text", textDelta: text });
1054
+ }
1055
+ });
1056
+ }
1057
+ function createTextStreamer(tokenizer, updateProgress, transformers) {
1058
+ const { TextStreamer } = transformers;
1059
+ let count = 0;
1060
+ return new TextStreamer(tokenizer, {
1061
+ skip_prompt: true,
1062
+ decode_kwargs: { skip_special_tokens: true },
1063
+ callback_function: (text) => {
1064
+ count++;
1065
+ const result = 100 * (1 - Math.exp(-0.05 * count));
1066
+ const progress = Math.round(Math.min(result, 100));
1067
+ updateProgress(progress, "Generating", { text, progress });
1068
+ }
1069
+ });
1070
+ }
1071
+
1072
+ // src/provider-hf-transformers/common/HFT_TextOutput.ts
1073
+ function extractGeneratedText(generatedText) {
1074
+ if (generatedText == null)
1075
+ return "";
1076
+ if (typeof generatedText === "string")
1077
+ return generatedText;
1078
+ const lastMessage = generatedText[generatedText.length - 1];
1079
+ if (!lastMessage)
1080
+ return "";
1081
+ const content = lastMessage.content;
1082
+ if (typeof content === "string")
1083
+ return content;
1084
+ for (const part of content) {
1085
+ if (part.type === "text" && "text" in part) {
1086
+ return part.text;
1087
+ }
1088
+ }
1089
+ return "";
1090
+ }
1091
+
1092
+ // src/provider-hf-transformers/common/HFT_StructuredGeneration.ts
1093
+ function buildStructuredGenerationPrompt(input) {
1094
+ const schemaStr = JSON.stringify(input.outputSchema, null, 2);
1095
+ return `${input.prompt}
1096
+
1097
+ ` + `You MUST respond with ONLY a valid JSON object conforming to this JSON schema:
1098
+ ${schemaStr}
1099
+
1100
+ ` + `Output ONLY the JSON object, no other text.`;
1101
+ }
1102
+ function extractJsonFromText(text) {
1103
+ try {
1104
+ return JSON.parse(text);
1105
+ } catch {
1106
+ const match = text.match(/\{[\s\S]*\}/);
1107
+ if (match) {
1108
+ try {
1109
+ return JSON.parse(match[0]);
1110
+ } catch {
1111
+ return parsePartialJson(match[0]) ?? {};
1112
+ }
1113
+ }
1114
+ return {};
1115
+ }
1116
+ }
1117
+ var HFT_StructuredGeneration = async (input, model, onProgress, signal) => {
1118
+ const generateText = await getPipeline(model, onProgress, {}, signal);
1119
+ const sdk = await loadTransformersSDK();
1120
+ const prompt = buildStructuredGenerationPrompt(input);
1121
+ const messages = [{ role: "user", content: prompt }];
1122
+ const formattedPrompt = generateText.tokenizer.apply_chat_template(messages, {
1123
+ tokenize: false,
1124
+ add_generation_prompt: true
1125
+ });
1126
+ const streamer = createTextStreamer(generateText.tokenizer, onProgress, sdk);
1127
+ let results = await generateText(formattedPrompt, {
1128
+ max_new_tokens: input.maxTokens ?? 1024,
1129
+ temperature: input.temperature ?? undefined,
1130
+ return_full_text: false,
1131
+ streamer
1132
+ });
1133
+ if (!Array.isArray(results)) {
1134
+ results = [results];
1135
+ }
1136
+ const responseText = extractGeneratedText(results[0]?.generated_text).trim();
1137
+ const object = extractJsonFromText(responseText);
1138
+ return { object };
1139
+ };
1140
+ var HFT_StructuredGeneration_Stream = async function* (input, model, signal) {
1141
+ const noopProgress = () => {};
1142
+ const generateText = await getPipeline(model, noopProgress, {}, signal);
1143
+ const sdk = await loadTransformersSDK();
1144
+ const prompt = buildStructuredGenerationPrompt(input);
1145
+ const messages = [{ role: "user", content: prompt }];
1146
+ const formattedPrompt = generateText.tokenizer.apply_chat_template(messages, {
1147
+ tokenize: false,
1148
+ add_generation_prompt: true
1149
+ });
1150
+ const queue = createStreamEventQueue();
1151
+ const streamer = createStreamingTextStreamer(generateText.tokenizer, queue, sdk);
1152
+ let fullText = "";
1153
+ const originalPush = queue.push;
1154
+ queue.push = (event) => {
1155
+ if (event.type === "text-delta" && "textDelta" in event) {
1156
+ fullText += event.textDelta;
1157
+ const match = fullText.match(/\{[\s\S]*/);
1158
+ if (match) {
1159
+ const partial = parsePartialJson(match[0]);
1160
+ if (partial !== undefined) {
1161
+ originalPush({
1162
+ type: "object-delta",
1163
+ port: "object",
1164
+ objectDelta: partial
1165
+ });
1166
+ return;
1167
+ }
1168
+ }
1169
+ }
1170
+ originalPush(event);
1171
+ };
1172
+ const pipelinePromise = generateText(formattedPrompt, {
1173
+ max_new_tokens: input.maxTokens ?? 1024,
1174
+ temperature: input.temperature ?? undefined,
1175
+ return_full_text: false,
1176
+ streamer
1177
+ }).then(() => queue.done(), (err) => queue.error(err));
1178
+ yield* queue.iterable;
1179
+ await pipelinePromise;
1180
+ const object = extractJsonFromText(fullText);
1181
+ yield { type: "finish", data: { object } };
1182
+ };
1183
+
1184
+ // src/provider-hf-transformers/common/HFT_TextClassification.ts
1185
+ init_HFT_Pipeline();
1186
+ var HFT_TextClassification = async (input, model, onProgress, signal) => {
1187
+ const isArrayInput = Array.isArray(input.text);
1188
+ if (model?.provider_config?.pipeline === "zero-shot-classification") {
1189
+ if (!input.candidateLabels || !Array.isArray(input.candidateLabels) || input.candidateLabels.length === 0) {
1190
+ throw new Error("Zero-shot text classification requires candidate labels");
1191
+ }
1192
+ const zeroShotClassifier = await getPipeline(model, onProgress, {}, signal);
1193
+ const result2 = await zeroShotClassifier(input.text, input.candidateLabels, {});
1194
+ if (isArrayInput) {
1195
+ const results = Array.isArray(result2) && Array.isArray(result2[0]?.labels) ? result2 : [result2];
1196
+ return {
1197
+ categories: results.map((r) => r.labels.map((label, idx) => ({
1198
+ label,
1199
+ score: r.scores[idx]
1200
+ })))
1201
+ };
1202
+ }
1203
+ return {
1204
+ categories: result2.labels.map((label, idx) => ({
1205
+ label,
1206
+ score: result2.scores[idx]
1207
+ }))
1208
+ };
1209
+ }
1210
+ const TextClassification = await getPipeline(model, onProgress, {}, signal);
1211
+ const result = await TextClassification(input.text, {
1212
+ top_k: input.maxCategories || undefined
1213
+ });
1214
+ if (isArrayInput) {
1215
+ return {
1216
+ categories: result.map((perInput) => {
1217
+ const items = Array.isArray(perInput) ? perInput : [perInput];
1218
+ return items.map((category) => ({
1219
+ label: category.label,
1220
+ score: category.score
1221
+ }));
1222
+ })
1223
+ };
1224
+ }
1225
+ if (Array.isArray(result[0])) {
1226
+ return {
1227
+ categories: result[0].map((category) => ({
1228
+ label: category.label,
1229
+ score: category.score
1230
+ }))
1231
+ };
1232
+ }
1233
+ return {
1234
+ categories: result.map((category) => ({
1235
+ label: category.label,
1236
+ score: category.score
1237
+ }))
1238
+ };
1239
+ };
1240
+
1241
+ // src/provider-hf-transformers/common/HFT_TextEmbedding.ts
1242
+ init_HFT_Pipeline();
1243
+ import { getLogger as getLogger5 } from "@workglow/util/worker";
1244
+ var HFT_TextEmbedding = async (input, model, onProgress, signal) => {
1245
+ const logger = getLogger5();
1246
+ const uuid = crypto.randomUUID();
1247
+ const timerLabel = `hft:TextEmbedding:${model?.provider_config.model_path}:${uuid}`;
1248
+ logger.time(timerLabel, { model: model?.provider_config.model_path });
1249
+ const generateEmbedding = await getPipeline(model, onProgress, {}, signal);
1250
+ logger.debug("HFT TextEmbedding: pipeline ready, generating embedding", {
1251
+ model: model?.provider_config.model_path,
1252
+ inputLength: Array.isArray(input.text) ? input.text.length : input.text?.length
1253
+ });
1254
+ const hfVector = await generateEmbedding(input.text, {
1255
+ pooling: model?.provider_config.pooling || "mean",
1256
+ normalize: model?.provider_config.normalize
1257
+ });
1258
+ const isArrayInput = Array.isArray(input.text);
1259
+ const embeddingDim = model?.provider_config.native_dimensions;
1260
+ if (isArrayInput && hfVector.dims.length > 1) {
1261
+ const [numTexts, vectorDim] = hfVector.dims;
1262
+ if (numTexts !== input.text.length) {
1263
+ throw new Error(`HuggingFace Embedding tensor batch size does not match input array length: ${numTexts} != ${input.text.length}`);
1264
+ }
1265
+ if (vectorDim !== embeddingDim) {
1266
+ throw new Error(`HuggingFace Embedding vector dimension does not match model dimensions: ${vectorDim} != ${embeddingDim}`);
1267
+ }
1268
+ const vectors = Array.from({ length: numTexts }, (_, i) => hfVector[i].data.slice());
1269
+ logger.timeEnd(timerLabel, { batchSize: numTexts, dimensions: vectorDim });
1270
+ return { vector: vectors };
1271
+ }
1272
+ if (hfVector.size !== embeddingDim) {
1273
+ logger.timeEnd(timerLabel, { status: "error", reason: "dimension mismatch" });
1274
+ console.warn(`HuggingFace Embedding vector length does not match model dimensions v${hfVector.size} != m${embeddingDim}`, input, hfVector);
1275
+ throw new Error(`HuggingFace Embedding vector length does not match model dimensions v${hfVector.size} != m${embeddingDim}`);
1276
+ }
1277
+ logger.timeEnd(timerLabel, { dimensions: hfVector.size });
1278
+ return { vector: hfVector.data };
1279
+ };
1280
+
1281
+ // src/provider-hf-transformers/common/HFT_TextFillMask.ts
1282
+ init_HFT_Pipeline();
1283
+ var HFT_TextFillMask = async (input, model, onProgress, signal) => {
1284
+ const isArrayInput = Array.isArray(input.text);
1285
+ const unmasker = await getPipeline(model, onProgress, {}, signal);
1286
+ const results = await unmasker(input.text);
1287
+ if (isArrayInput) {
1288
+ return {
1289
+ predictions: results.map((perInput) => {
1290
+ const items = Array.isArray(perInput) ? perInput : [perInput];
1291
+ return items.map((prediction) => ({
1292
+ entity: prediction.token_str,
1293
+ score: prediction.score,
1294
+ sequence: prediction.sequence
1295
+ }));
1296
+ })
1297
+ };
1298
+ }
1299
+ let predictions = [];
1300
+ if (!Array.isArray(results)) {
1301
+ predictions = [results];
1302
+ } else {
1303
+ predictions = results;
1304
+ }
1305
+ return {
422
1306
  predictions: predictions.map((prediction) => ({
423
1307
  entity: prediction.token_str,
424
1308
  score: prediction.score,
@@ -426,17 +1310,22 @@ var HFT_TextFillMask = async (input, model, onProgress, signal) => {
426
1310
  }))
427
1311
  };
428
1312
  };
1313
+
1314
+ // src/provider-hf-transformers/common/HFT_TextGeneration.ts
1315
+ init_HFT_Pipeline();
1316
+ import { getLogger as getLogger6 } from "@workglow/util/worker";
429
1317
  var HFT_TextGeneration = async (input, model, onProgress, signal) => {
430
- const logger = getLogger();
1318
+ const logger = getLogger6();
431
1319
  const timerLabel = `hft:TextGeneration:${model?.provider_config.model_path}`;
432
1320
  logger.time(timerLabel, { model: model?.provider_config.model_path });
433
1321
  const isArrayInput = Array.isArray(input.prompt);
434
1322
  const generateText = await getPipeline(model, onProgress, {}, signal);
1323
+ const sdk = await loadTransformersSDK();
435
1324
  logger.debug("HFT TextGeneration: pipeline ready, generating text", {
436
1325
  model: model?.provider_config.model_path,
437
1326
  promptLength: isArrayInput ? input.prompt.length : input.prompt?.length
438
1327
  });
439
- const streamer = isArrayInput ? undefined : createTextStreamer(generateText.tokenizer, onProgress);
1328
+ const streamer = isArrayInput ? undefined : createTextStreamer(generateText.tokenizer, onProgress, sdk);
440
1329
  let results = await generateText(input.prompt, {
441
1330
  ...streamer ? { streamer } : {}
442
1331
  });
@@ -452,439 +1341,141 @@ var HFT_TextGeneration = async (input, model, onProgress, signal) => {
452
1341
  if (!Array.isArray(results)) {
453
1342
  results = [results];
454
1343
  }
455
- const text = extractGeneratedText(results[0]?.generated_text);
456
- logger.timeEnd(timerLabel, { outputLength: text?.length });
457
- return {
458
- text
459
- };
460
- };
461
- var HFT_TextTranslation = async (input, model, onProgress, signal) => {
462
- const isArrayInput = Array.isArray(input.text);
463
- const translate = await getPipeline(model, onProgress, {}, signal);
464
- const streamer = isArrayInput ? undefined : createTextStreamer(translate.tokenizer, onProgress);
465
- const result = await translate(input.text, {
466
- src_lang: input.source_lang,
467
- tgt_lang: input.target_lang,
468
- ...streamer ? { streamer } : {}
469
- });
470
- if (isArrayInput) {
471
- const batchResults = Array.isArray(result) ? result : [result];
472
- return {
473
- text: batchResults.map((r) => r?.translation_text || ""),
474
- target_lang: input.target_lang
475
- };
476
- }
477
- const translatedText = Array.isArray(result) ? result[0]?.translation_text || "" : result?.translation_text || "";
478
- return {
479
- text: translatedText,
480
- target_lang: input.target_lang
481
- };
482
- };
483
- var HFT_TextRewriter = async (input, model, onProgress, signal) => {
484
- const isArrayInput = Array.isArray(input.text);
485
- const generateText = await getPipeline(model, onProgress, {}, signal);
486
- const streamer = isArrayInput ? undefined : createTextStreamer(generateText.tokenizer, onProgress);
487
- if (isArrayInput) {
488
- const texts = input.text;
489
- const promptedTexts = texts.map((t) => (input.prompt ? input.prompt + `
490
- ` : "") + t);
491
- let results2 = await generateText(promptedTexts, {});
492
- const batchResults = Array.isArray(results2) ? results2 : [results2];
493
- const outputTexts = batchResults.map((r, i) => {
494
- const seqs = Array.isArray(r) ? r : [r];
495
- const text2 = extractGeneratedText(seqs[0]?.generated_text);
496
- if (text2 === promptedTexts[i]) {
497
- throw new Error("Rewriter failed to generate new text");
498
- }
499
- return text2;
500
- });
501
- return { text: outputTexts };
502
- }
503
- const promptedText = (input.prompt ? input.prompt + `
504
- ` : "") + input.text;
505
- let results = await generateText(promptedText, {
506
- ...streamer ? { streamer } : {}
507
- });
508
- if (!Array.isArray(results)) {
509
- results = [results];
510
- }
511
- const text = extractGeneratedText(results[0]?.generated_text);
512
- if (text === promptedText) {
513
- throw new Error("Rewriter failed to generate new text");
514
- }
515
- return {
516
- text
517
- };
518
- };
519
- var HFT_TextSummary = async (input, model, onProgress, signal) => {
520
- const isArrayInput = Array.isArray(input.text);
521
- const generateSummary = await getPipeline(model, onProgress, {}, signal);
522
- const streamer = isArrayInput ? undefined : createTextStreamer(generateSummary.tokenizer, onProgress);
523
- const result = await generateSummary(input.text, {
524
- ...streamer ? { streamer } : {}
525
- });
526
- if (isArrayInput) {
527
- const batchResults = Array.isArray(result) ? result : [result];
528
- return {
529
- text: batchResults.map((r) => r?.summary_text || "")
530
- };
531
- }
532
- let summaryText = "";
533
- if (Array.isArray(result)) {
534
- summaryText = result[0]?.summary_text || "";
535
- } else {
536
- summaryText = result?.summary_text || "";
537
- }
538
- return {
539
- text: summaryText
540
- };
541
- };
542
- var HFT_TextQuestionAnswer = async (input, model, onProgress, signal) => {
543
- const isArrayInput = Array.isArray(input.question);
544
- const generateAnswer = await getPipeline(model, onProgress, {}, signal);
545
- if (isArrayInput) {
546
- const questions = input.question;
547
- const contexts = input.context;
548
- if (questions.length !== contexts.length) {
549
- throw new Error(`question[] and context[] must have the same length: ${questions.length} != ${contexts.length}`);
550
- }
551
- const answers = [];
552
- for (let i = 0;i < questions.length; i++) {
553
- const result2 = await generateAnswer(questions[i], contexts[i], {});
554
- let answerText2 = "";
555
- if (Array.isArray(result2)) {
556
- answerText2 = result2[0]?.answer || "";
557
- } else {
558
- answerText2 = result2?.answer || "";
559
- }
560
- answers.push(answerText2);
561
- }
562
- return { text: answers };
563
- }
564
- const streamer = createTextStreamer(generateAnswer.tokenizer, onProgress);
565
- const result = await generateAnswer(input.question, input.context, {
566
- streamer
567
- });
568
- let answerText = "";
569
- if (Array.isArray(result)) {
570
- answerText = result[0]?.answer || "";
571
- } else {
572
- answerText = result?.answer || "";
573
- }
574
- return {
575
- text: answerText
576
- };
577
- };
578
- var HFT_ImageSegmentation = async (input, model, onProgress, signal) => {
579
- const segmenter = await getPipeline(model, onProgress, {}, signal);
580
- const result = await segmenter(input.image, {
581
- threshold: input.threshold,
582
- mask_threshold: input.maskThreshold
583
- });
584
- const masks = Array.isArray(result) ? result : [result];
585
- const processedMasks = await Promise.all(masks.map(async (mask) => ({
586
- label: mask.label || "",
587
- score: mask.score || 0,
588
- mask: {}
589
- })));
590
- return {
591
- masks: processedMasks
592
- };
593
- };
594
- var HFT_ImageToText = async (input, model, onProgress, signal) => {
595
- const captioner = await getPipeline(model, onProgress, {}, signal);
596
- const result = await captioner(input.image, {
597
- max_new_tokens: input.maxTokens
598
- });
599
- const text = Array.isArray(result) ? result[0]?.generated_text : result?.generated_text;
1344
+ const text = extractGeneratedText(results[0]?.generated_text);
1345
+ logger.timeEnd(timerLabel, { outputLength: text?.length });
600
1346
  return {
601
- text: text || ""
1347
+ text
602
1348
  };
603
1349
  };
604
- var HFT_BackgroundRemoval = async (input, model, onProgress, signal) => {
605
- const remover = await getPipeline(model, onProgress, {}, signal);
606
- const result = await remover(input.image);
607
- const resultImage = Array.isArray(result) ? result[0] : result;
608
- return {
609
- image: imageToBase64(resultImage)
610
- };
1350
+ var HFT_TextGeneration_Stream = async function* (input, model, signal) {
1351
+ const noopProgress = () => {};
1352
+ const generateText = await getPipeline(model, noopProgress, {}, signal);
1353
+ const sdk = await loadTransformersSDK();
1354
+ const queue = createStreamEventQueue();
1355
+ const streamer = createStreamingTextStreamer(generateText.tokenizer, queue, sdk);
1356
+ const pipelinePromise = generateText(input.prompt, {
1357
+ streamer
1358
+ }).then(() => queue.done(), (err) => queue.error(err));
1359
+ yield* queue.iterable;
1360
+ await pipelinePromise;
1361
+ yield { type: "finish", data: {} };
611
1362
  };
612
- var HFT_ImageEmbedding = async (input, model, onProgress, signal) => {
613
- const logger = getLogger();
614
- const timerLabel = `hft:ImageEmbedding:${model?.provider_config.model_path}`;
615
- logger.time(timerLabel, { model: model?.provider_config.model_path });
616
- const embedder = await getPipeline(model, onProgress, {}, signal);
617
- logger.debug("HFT ImageEmbedding: pipeline ready, generating embedding", {
618
- model: model?.provider_config.model_path
1363
+
1364
+ // src/provider-hf-transformers/common/HFT_TextLanguageDetection.ts
1365
+ init_HFT_Pipeline();
1366
+ var HFT_TextLanguageDetection = async (input, model, onProgress, signal) => {
1367
+ const isArrayInput = Array.isArray(input.text);
1368
+ const TextClassification = await getPipeline(model, onProgress, {}, signal);
1369
+ const result = await TextClassification(input.text, {
1370
+ top_k: input.maxLanguages || undefined
619
1371
  });
620
- const result = await embedder(input.image);
621
- logger.timeEnd(timerLabel, { dimensions: result?.data?.length });
622
- return {
623
- vector: result.data
624
- };
625
- };
626
- var HFT_ImageClassification = async (input, model, onProgress, signal) => {
627
- if (model?.provider_config?.pipeline === "zero-shot-image-classification") {
628
- if (!input.categories || !Array.isArray(input.categories) || input.categories.length === 0) {
629
- console.warn("Zero-shot image classification requires categories", input);
630
- throw new Error("Zero-shot image classification requires categories");
631
- }
632
- const zeroShotClassifier = await getPipeline(model, onProgress, {}, signal);
633
- const result2 = await zeroShotClassifier(input.image, input.categories, {});
634
- const results2 = Array.isArray(result2) ? result2 : [result2];
1372
+ if (isArrayInput) {
635
1373
  return {
636
- categories: results2.map((r) => ({
637
- label: r.label,
638
- score: r.score
639
- }))
1374
+ languages: result.map((perInput) => {
1375
+ const items = Array.isArray(perInput) ? perInput : [perInput];
1376
+ return items.map((category) => ({
1377
+ language: category.label,
1378
+ score: category.score
1379
+ }));
1380
+ })
640
1381
  };
641
1382
  }
642
- const classifier = await getPipeline(model, onProgress, {}, signal);
643
- const result = await classifier(input.image, {
644
- top_k: input.maxCategories
645
- });
646
- const results = Array.isArray(result) ? result : [result];
647
- return {
648
- categories: results.map((r) => ({
649
- label: r.label,
650
- score: r.score
651
- }))
652
- };
653
- };
654
- var HFT_ObjectDetection = async (input, model, onProgress, signal) => {
655
- if (model?.provider_config?.pipeline === "zero-shot-object-detection") {
656
- if (!input.labels || !Array.isArray(input.labels) || input.labels.length === 0) {
657
- throw new Error("Zero-shot object detection requires labels");
658
- }
659
- const zeroShotDetector = await getPipeline(model, onProgress, {}, signal);
660
- const result2 = await zeroShotDetector(input.image, Array.from(input.labels), {
661
- threshold: input.threshold
662
- });
663
- const detections2 = Array.isArray(result2) ? result2 : [result2];
1383
+ if (Array.isArray(result[0])) {
664
1384
  return {
665
- detections: detections2.map((d) => ({
666
- label: d.label,
667
- score: d.score,
668
- box: d.box
1385
+ languages: result[0].map((category) => ({
1386
+ language: category.label,
1387
+ score: category.score
669
1388
  }))
670
1389
  };
671
1390
  }
672
- const detector = await getPipeline(model, onProgress, {}, signal);
673
- const result = await detector(input.image, {
674
- threshold: input.threshold
675
- });
676
- const detections = Array.isArray(result) ? result : [result];
677
1391
  return {
678
- detections: detections.map((d) => ({
679
- label: d.label,
680
- score: d.score,
681
- box: d.box
1392
+ languages: result.map((category) => ({
1393
+ language: category.label,
1394
+ score: category.score
682
1395
  }))
683
1396
  };
684
1397
  };
685
- function imageToBase64(image) {
686
- return image.toBase64?.() || "";
687
- }
688
- function createTextStreamer(tokenizer, updateProgress) {
689
- const { TextStreamer } = _transformersSdk;
690
- let count = 0;
691
- return new TextStreamer(tokenizer, {
692
- skip_prompt: true,
693
- decode_kwargs: { skip_special_tokens: true },
694
- callback_function: (text) => {
695
- count++;
696
- const result = 100 * (1 - Math.exp(-0.05 * count));
697
- const progress = Math.round(Math.min(result, 100));
698
- updateProgress(progress, "Generating", { text, progress });
699
- }
1398
+
1399
+ // src/provider-hf-transformers/common/HFT_TextNamedEntityRecognition.ts
1400
+ init_HFT_Pipeline();
1401
+ var HFT_TextNamedEntityRecognition = async (input, model, onProgress, signal) => {
1402
+ const isArrayInput = Array.isArray(input.text);
1403
+ const textNamedEntityRecognition = await getPipeline(model, onProgress, {}, signal);
1404
+ const results = await textNamedEntityRecognition(input.text, {
1405
+ ignore_labels: input.blockList
700
1406
  });
701
- }
702
- function extractGeneratedText(generatedText) {
703
- if (generatedText == null)
704
- return "";
705
- if (typeof generatedText === "string")
706
- return generatedText;
707
- const lastMessage = generatedText[generatedText.length - 1];
708
- if (!lastMessage)
709
- return "";
710
- const content = lastMessage.content;
711
- if (typeof content === "string")
712
- return content;
713
- for (const part of content) {
714
- if (part.type === "text" && "text" in part) {
715
- return part.text;
716
- }
1407
+ if (isArrayInput) {
1408
+ return {
1409
+ entities: results.map((perInput) => {
1410
+ const items = Array.isArray(perInput) ? perInput : [perInput];
1411
+ return items.map((entity) => ({
1412
+ entity: entity.entity,
1413
+ score: entity.score,
1414
+ word: entity.word
1415
+ }));
1416
+ })
1417
+ };
717
1418
  }
718
- return "";
719
- }
720
- function createStreamEventQueue() {
721
- const buffer = [];
722
- let resolve = null;
723
- let finished = false;
724
- let err = null;
725
- const push = (event) => {
726
- if (resolve) {
727
- const r = resolve;
728
- resolve = null;
729
- r({ value: event, done: false });
730
- } else {
731
- buffer.push(event);
732
- }
733
- };
734
- const done = () => {
735
- finished = true;
736
- if (resolve) {
737
- const r = resolve;
738
- resolve = null;
739
- r({ value: undefined, done: true });
740
- }
741
- };
742
- const error = (e) => {
743
- err = e;
744
- if (resolve) {
745
- const r = resolve;
746
- resolve = null;
747
- r({ value: undefined, done: true });
748
- }
749
- };
750
- const iterable = {
751
- [Symbol.asyncIterator]() {
752
- return {
753
- next() {
754
- if (err)
755
- return Promise.reject(err);
756
- if (buffer.length > 0) {
757
- return Promise.resolve({ value: buffer.shift(), done: false });
758
- }
759
- if (finished) {
760
- return Promise.resolve({ value: undefined, done: true });
761
- }
762
- return new Promise((r) => {
763
- resolve = r;
764
- });
765
- }
766
- };
767
- }
768
- };
769
- return { push, done, error, iterable };
770
- }
771
- function createStreamingTextStreamer(tokenizer, queue) {
772
- const { TextStreamer } = _transformersSdk;
773
- return new TextStreamer(tokenizer, {
774
- skip_prompt: true,
775
- decode_kwargs: { skip_special_tokens: true },
776
- callback_function: (text) => {
777
- queue.push({ type: "text-delta", port: "text", textDelta: text });
778
- }
779
- });
780
- }
781
- function createToolCallMarkupFilter(emit) {
782
- const OPEN_TAG = "<tool_call>";
783
- const CLOSE_TAG = "</tool_call>";
784
- let state = "text";
785
- let pending = "";
786
- function feed(token) {
787
- if (state === "tag") {
788
- pending += token;
789
- const closeIdx = pending.indexOf(CLOSE_TAG);
790
- if (closeIdx !== -1) {
791
- const afterClose = pending.slice(closeIdx + CLOSE_TAG.length);
792
- pending = "";
793
- state = "text";
794
- if (afterClose.length > 0) {
795
- feed(afterClose);
796
- }
797
- }
798
- return;
799
- }
800
- const combined = pending + token;
801
- const openIdx = combined.indexOf(OPEN_TAG);
802
- if (openIdx !== -1) {
803
- const before = combined.slice(0, openIdx);
804
- if (before.length > 0) {
805
- emit(before);
806
- }
807
- pending = "";
808
- state = "tag";
809
- const afterOpen = combined.slice(openIdx + OPEN_TAG.length);
810
- if (afterOpen.length > 0) {
811
- feed(afterOpen);
812
- }
813
- return;
814
- }
815
- let prefixLen = 0;
816
- for (let len = Math.min(combined.length, OPEN_TAG.length - 1);len >= 1; len--) {
817
- if (combined.endsWith(OPEN_TAG.slice(0, len))) {
818
- prefixLen = len;
819
- break;
820
- }
1419
+ let entities = [];
1420
+ if (!Array.isArray(results)) {
1421
+ entities = [results];
1422
+ } else {
1423
+ entities = results;
1424
+ }
1425
+ return {
1426
+ entities: entities.map((entity) => ({
1427
+ entity: entity.entity,
1428
+ score: entity.score,
1429
+ word: entity.word
1430
+ }))
1431
+ };
1432
+ };
1433
+
1434
+ // src/provider-hf-transformers/common/HFT_TextQuestionAnswer.ts
1435
+ init_HFT_Pipeline();
1436
+ var HFT_TextQuestionAnswer = async (input, model, onProgress, signal) => {
1437
+ const isArrayInput = Array.isArray(input.question);
1438
+ const generateAnswer = await getPipeline(model, onProgress, {}, signal);
1439
+ if (isArrayInput) {
1440
+ const questions = input.question;
1441
+ const contexts = input.context;
1442
+ if (questions.length !== contexts.length) {
1443
+ throw new Error(`question[] and context[] must have the same length: ${questions.length} != ${contexts.length}`);
821
1444
  }
822
- if (prefixLen > 0) {
823
- const safe = combined.slice(0, combined.length - prefixLen);
824
- if (safe.length > 0) {
825
- emit(safe);
826
- }
827
- pending = combined.slice(combined.length - prefixLen);
828
- } else {
829
- if (combined.length > 0) {
830
- emit(combined);
1445
+ const answers = [];
1446
+ for (let i = 0;i < questions.length; i++) {
1447
+ const result2 = await generateAnswer(questions[i], contexts[i], {});
1448
+ let answerText2 = "";
1449
+ if (Array.isArray(result2)) {
1450
+ answerText2 = result2[0]?.answer || "";
1451
+ } else {
1452
+ answerText2 = result2?.answer || "";
831
1453
  }
832
- pending = "";
833
- }
834
- }
835
- function flush() {
836
- if (pending.length > 0 && state === "text") {
837
- emit(pending);
838
- pending = "";
1454
+ answers.push(answerText2);
839
1455
  }
840
- pending = "";
841
- state = "text";
1456
+ return { text: answers };
842
1457
  }
843
- return { feed, flush };
844
- }
845
- var HFT_TextGeneration_Stream = async function* (input, model, signal) {
846
- const noopProgress = () => {};
847
- const generateText = await getPipeline(model, noopProgress, {}, signal);
848
- const queue = createStreamEventQueue();
849
- const streamer = createStreamingTextStreamer(generateText.tokenizer, queue);
850
- const pipelinePromise = generateText(input.prompt, {
851
- streamer
852
- }).then(() => queue.done(), (err) => queue.error(err));
853
- yield* queue.iterable;
854
- await pipelinePromise;
855
- yield { type: "finish", data: {} };
856
- };
857
- var HFT_TextRewriter_Stream = async function* (input, model, signal) {
858
- const noopProgress = () => {};
859
- const generateText = await getPipeline(model, noopProgress, {}, signal);
860
- const queue = createStreamEventQueue();
861
- const streamer = createStreamingTextStreamer(generateText.tokenizer, queue);
862
- const promptedText = (input.prompt ? input.prompt + `
863
- ` : "") + input.text;
864
- const pipelinePromise = generateText(promptedText, {
865
- streamer
866
- }).then(() => queue.done(), (err) => queue.error(err));
867
- yield* queue.iterable;
868
- await pipelinePromise;
869
- yield { type: "finish", data: {} };
870
- };
871
- var HFT_TextSummary_Stream = async function* (input, model, signal) {
872
- const noopProgress = () => {};
873
- const generateSummary = await getPipeline(model, noopProgress, {}, signal);
874
- const queue = createStreamEventQueue();
875
- const streamer = createStreamingTextStreamer(generateSummary.tokenizer, queue);
876
- const pipelinePromise = generateSummary(input.text, {
1458
+ const sdk = await loadTransformersSDK();
1459
+ const streamer = createTextStreamer(generateAnswer.tokenizer, onProgress, sdk);
1460
+ const result = await generateAnswer(input.question, input.context, {
877
1461
  streamer
878
- }).then(() => queue.done(), (err) => queue.error(err));
879
- yield* queue.iterable;
880
- await pipelinePromise;
881
- yield { type: "finish", data: {} };
1462
+ });
1463
+ let answerText = "";
1464
+ if (Array.isArray(result)) {
1465
+ answerText = result[0]?.answer || "";
1466
+ } else {
1467
+ answerText = result?.answer || "";
1468
+ }
1469
+ return {
1470
+ text: answerText
1471
+ };
882
1472
  };
883
1473
  var HFT_TextQuestionAnswer_Stream = async function* (input, model, signal) {
884
1474
  const noopProgress = () => {};
885
1475
  const generateAnswer = await getPipeline(model, noopProgress, {}, signal);
1476
+ const sdk = await loadTransformersSDK();
886
1477
  const queue = createStreamEventQueue();
887
- const streamer = createStreamingTextStreamer(generateAnswer.tokenizer, queue);
1478
+ const streamer = createStreamingTextStreamer(generateAnswer.tokenizer, queue, sdk);
888
1479
  let pipelineResult;
889
1480
  const pipelinePromise = generateAnswer(input.question, input.context, {
890
1481
  streamer
@@ -904,153 +1495,155 @@ var HFT_TextQuestionAnswer_Stream = async function* (input, model, signal) {
904
1495
  }
905
1496
  yield { type: "finish", data: { text: answerText } };
906
1497
  };
907
- var HFT_TextTranslation_Stream = async function* (input, model, signal) {
908
- const noopProgress = () => {};
909
- const translate = await getPipeline(model, noopProgress, {}, signal);
910
- const queue = createStreamEventQueue();
911
- const streamer = createStreamingTextStreamer(translate.tokenizer, queue);
912
- const pipelinePromise = translate(input.text, {
913
- src_lang: input.source_lang,
914
- tgt_lang: input.target_lang,
915
- streamer
916
- }).then(() => queue.done(), (err) => queue.error(err));
917
- yield* queue.iterable;
918
- await pipelinePromise;
919
- yield { type: "finish", data: { target_lang: input.target_lang } };
920
- };
921
- var HFT_CountTokens = async (input, model, onProgress, signal) => {
1498
+
1499
+ // src/provider-hf-transformers/common/HFT_TextRewriter.ts
1500
+ init_HFT_Pipeline();
1501
+ var HFT_TextRewriter = async (input, model, onProgress, signal) => {
922
1502
  const isArrayInput = Array.isArray(input.text);
923
- const { AutoTokenizer } = _transformersSdk;
924
- const tokenizer = await AutoTokenizer.from_pretrained(model.provider_config.model_path, {
925
- progress_callback: (progress) => onProgress(progress?.progress ?? 0)
926
- });
1503
+ const generateText = await getPipeline(model, onProgress, {}, signal);
1504
+ const sdk = await loadTransformersSDK();
1505
+ const streamer = isArrayInput ? undefined : createTextStreamer(generateText.tokenizer, onProgress, sdk);
927
1506
  if (isArrayInput) {
928
1507
  const texts = input.text;
929
- const counts = texts.map((t) => tokenizer.encode(t).length);
930
- return { count: counts };
1508
+ const promptedTexts = texts.map((t) => (input.prompt ? input.prompt + `
1509
+ ` : "") + t);
1510
+ let results2 = await generateText(promptedTexts, {});
1511
+ const batchResults = Array.isArray(results2) ? results2 : [results2];
1512
+ const outputTexts = batchResults.map((r, i) => {
1513
+ const seqs = Array.isArray(r) ? r : [r];
1514
+ const text2 = extractGeneratedText(seqs[0]?.generated_text);
1515
+ if (text2 === promptedTexts[i]) {
1516
+ throw new Error("Rewriter failed to generate new text");
1517
+ }
1518
+ return text2;
1519
+ });
1520
+ return { text: outputTexts };
931
1521
  }
932
- const tokenIds = tokenizer.encode(input.text);
933
- return { count: tokenIds.length };
934
- };
935
- var HFT_CountTokens_Reactive = async (input, _output, model) => {
936
- return HFT_CountTokens(input, model, () => {}, new AbortController().signal);
937
- };
938
- function mapHFTTools(tools) {
939
- return tools.map((t) => ({
940
- type: "function",
941
- function: {
942
- name: t.name,
943
- description: buildToolDescription(t),
944
- parameters: t.inputSchema
945
- }
946
- }));
947
- }
948
- function parseToolCallsFromText(responseText) {
949
- const toolCalls = [];
950
- let callIndex = 0;
951
- let cleanedText = responseText;
952
- const toolCallTagRegex = /<tool_call>([\s\S]*?)<\/tool_call>/g;
953
- let tagMatch;
954
- while ((tagMatch = toolCallTagRegex.exec(responseText)) !== null) {
955
- try {
956
- const parsed = JSON.parse(tagMatch[1].trim());
957
- const id = `call_${callIndex++}`;
958
- toolCalls.push({
959
- id,
960
- name: parsed.name ?? parsed.function?.name ?? "",
961
- input: parsed.arguments ?? parsed.function?.arguments ?? parsed.parameters ?? {}
962
- });
963
- } catch {}
1522
+ const promptedText = (input.prompt ? input.prompt + `
1523
+ ` : "") + input.text;
1524
+ let results = await generateText(promptedText, {
1525
+ ...streamer ? { streamer } : {}
1526
+ });
1527
+ if (!Array.isArray(results)) {
1528
+ results = [results];
964
1529
  }
965
- if (toolCalls.length > 0) {
966
- cleanedText = responseText.replace(/<tool_call>[\s\S]*?<\/tool_call>/g, "").trim();
967
- return { text: cleanedText, toolCalls };
1530
+ const text = extractGeneratedText(results[0]?.generated_text);
1531
+ if (text === promptedText) {
1532
+ throw new Error("Rewriter failed to generate new text");
968
1533
  }
969
- const jsonCandidates = [];
970
- (function collectBalancedJsonBlocks(source) {
971
- const length = source.length;
972
- let i = 0;
973
- while (i < length) {
974
- if (source[i] !== "{") {
975
- i++;
976
- continue;
977
- }
978
- let depth = 1;
979
- let j = i + 1;
980
- let inString = false;
981
- let escape = false;
982
- while (j < length && depth > 0) {
983
- const ch = source[j];
984
- if (inString) {
985
- if (escape) {
986
- escape = false;
987
- } else if (ch === "\\") {
988
- escape = true;
989
- } else if (ch === '"') {
990
- inString = false;
991
- }
992
- } else {
993
- if (ch === '"') {
994
- inString = true;
995
- } else if (ch === "{") {
996
- depth++;
997
- } else if (ch === "}") {
998
- depth--;
999
- }
1000
- }
1001
- j++;
1002
- }
1003
- if (depth === 0) {
1004
- jsonCandidates.push({ text: source.slice(i, j), start: i, end: j });
1005
- i = j;
1006
- } else {
1007
- break;
1008
- }
1009
- }
1010
- })(responseText);
1011
- const matchedRanges = [];
1012
- for (const candidate of jsonCandidates) {
1013
- try {
1014
- const parsed = JSON.parse(candidate.text);
1015
- if (parsed.name && (parsed.arguments !== undefined || parsed.parameters !== undefined)) {
1016
- const id = `call_${callIndex++}`;
1017
- toolCalls.push({
1018
- id,
1019
- name: parsed.name,
1020
- input: parsed.arguments ?? parsed.parameters ?? {}
1021
- });
1022
- matchedRanges.push({ start: candidate.start, end: candidate.end });
1023
- } else if (parsed.function?.name) {
1024
- let functionArgs = parsed.function.arguments ?? {};
1025
- if (typeof functionArgs === "string") {
1026
- try {
1027
- functionArgs = JSON.parse(functionArgs);
1028
- } catch (innerError) {
1029
- console.warn("Failed to parse tool call function.arguments as JSON", innerError);
1030
- functionArgs = {};
1031
- }
1032
- }
1033
- const id = `call_${callIndex++}`;
1034
- toolCalls.push({
1035
- id,
1036
- name: parsed.function.name,
1037
- input: functionArgs ?? {}
1038
- });
1039
- matchedRanges.push({ start: candidate.start, end: candidate.end });
1040
- }
1041
- } catch {}
1534
+ return {
1535
+ text
1536
+ };
1537
+ };
1538
+ var HFT_TextRewriter_Stream = async function* (input, model, signal) {
1539
+ const noopProgress = () => {};
1540
+ const generateText = await getPipeline(model, noopProgress, {}, signal);
1541
+ const sdk = await loadTransformersSDK();
1542
+ const queue = createStreamEventQueue();
1543
+ const streamer = createStreamingTextStreamer(generateText.tokenizer, queue, sdk);
1544
+ const promptedText = (input.prompt ? input.prompt + `
1545
+ ` : "") + input.text;
1546
+ const pipelinePromise = generateText(promptedText, {
1547
+ streamer
1548
+ }).then(() => queue.done(), (err) => queue.error(err));
1549
+ yield* queue.iterable;
1550
+ await pipelinePromise;
1551
+ yield { type: "finish", data: {} };
1552
+ };
1553
+
1554
+ // src/provider-hf-transformers/common/HFT_TextSummary.ts
1555
+ init_HFT_Pipeline();
1556
+ var HFT_TextSummary = async (input, model, onProgress, signal) => {
1557
+ const isArrayInput = Array.isArray(input.text);
1558
+ const generateSummary = await getPipeline(model, onProgress, {}, signal);
1559
+ const sdk = await loadTransformersSDK();
1560
+ const streamer = isArrayInput ? undefined : createTextStreamer(generateSummary.tokenizer, onProgress, sdk);
1561
+ const result = await generateSummary(input.text, {
1562
+ ...streamer ? { streamer } : {}
1563
+ });
1564
+ if (isArrayInput) {
1565
+ const batchResults = Array.isArray(result) ? result : [result];
1566
+ return {
1567
+ text: batchResults.map((r) => r?.summary_text || "")
1568
+ };
1042
1569
  }
1043
- if (toolCalls.length > 0) {
1044
- let result = "";
1045
- let lastIndex = 0;
1046
- for (const range of matchedRanges) {
1047
- result += responseText.slice(lastIndex, range.start);
1048
- lastIndex = range.end;
1049
- }
1050
- result += responseText.slice(lastIndex);
1051
- cleanedText = result.trim();
1570
+ let summaryText = "";
1571
+ if (Array.isArray(result)) {
1572
+ summaryText = result[0]?.summary_text || "";
1573
+ } else {
1574
+ summaryText = result?.summary_text || "";
1052
1575
  }
1053
- return { text: cleanedText, toolCalls };
1576
+ return {
1577
+ text: summaryText
1578
+ };
1579
+ };
1580
+ var HFT_TextSummary_Stream = async function* (input, model, signal) {
1581
+ const noopProgress = () => {};
1582
+ const generateSummary = await getPipeline(model, noopProgress, {}, signal);
1583
+ const sdk = await loadTransformersSDK();
1584
+ const queue = createStreamEventQueue();
1585
+ const streamer = createStreamingTextStreamer(generateSummary.tokenizer, queue, sdk);
1586
+ const pipelinePromise = generateSummary(input.text, {
1587
+ streamer
1588
+ }).then(() => queue.done(), (err) => queue.error(err));
1589
+ yield* queue.iterable;
1590
+ await pipelinePromise;
1591
+ yield { type: "finish", data: {} };
1592
+ };
1593
+
1594
+ // src/provider-hf-transformers/common/HFT_TextTranslation.ts
1595
+ init_HFT_Pipeline();
1596
+ var HFT_TextTranslation = async (input, model, onProgress, signal) => {
1597
+ const isArrayInput = Array.isArray(input.text);
1598
+ const translate = await getPipeline(model, onProgress, {}, signal);
1599
+ const sdk = await loadTransformersSDK();
1600
+ const streamer = isArrayInput ? undefined : createTextStreamer(translate.tokenizer, onProgress, sdk);
1601
+ const result = await translate(input.text, {
1602
+ src_lang: input.source_lang,
1603
+ tgt_lang: input.target_lang,
1604
+ ...streamer ? { streamer } : {}
1605
+ });
1606
+ if (isArrayInput) {
1607
+ const batchResults = Array.isArray(result) ? result : [result];
1608
+ return {
1609
+ text: batchResults.map((r) => r?.translation_text || ""),
1610
+ target_lang: input.target_lang
1611
+ };
1612
+ }
1613
+ const translatedText = Array.isArray(result) ? result[0]?.translation_text || "" : result?.translation_text || "";
1614
+ return {
1615
+ text: translatedText,
1616
+ target_lang: input.target_lang
1617
+ };
1618
+ };
1619
+ var HFT_TextTranslation_Stream = async function* (input, model, signal) {
1620
+ const noopProgress = () => {};
1621
+ const translate = await getPipeline(model, noopProgress, {}, signal);
1622
+ const sdk = await loadTransformersSDK();
1623
+ const queue = createStreamEventQueue();
1624
+ const streamer = createStreamingTextStreamer(translate.tokenizer, queue, sdk);
1625
+ const pipelinePromise = translate(input.text, {
1626
+ src_lang: input.source_lang,
1627
+ tgt_lang: input.target_lang,
1628
+ streamer
1629
+ }).then(() => queue.done(), (err) => queue.error(err));
1630
+ yield* queue.iterable;
1631
+ await pipelinePromise;
1632
+ yield { type: "finish", data: { target_lang: input.target_lang } };
1633
+ };
1634
+
1635
+ // src/provider-hf-transformers/common/HFT_ToolCalling.ts
1636
+ init_HFT_Pipeline();
1637
+ import { buildToolDescription, filterValidToolCalls, toTextFlatMessages } from "@workglow/ai/worker";
1638
+ function mapHFTTools(tools) {
1639
+ return tools.map((t) => ({
1640
+ type: "function",
1641
+ function: {
1642
+ name: t.name,
1643
+ description: buildToolDescription(t),
1644
+ parameters: t.inputSchema
1645
+ }
1646
+ }));
1054
1647
  }
1055
1648
  function resolveHFTToolsAndMessages(input, messages) {
1056
1649
  if (input.toolChoice === "none") {
@@ -1077,6 +1670,7 @@ ${requiredInstruction}` };
1077
1670
  var HFT_ToolCalling = async (input, model, onProgress, signal) => {
1078
1671
  const isArrayInput = Array.isArray(input.prompt);
1079
1672
  const generateText = await getPipeline(model, onProgress, {}, signal);
1673
+ const sdk = await loadTransformersSDK();
1080
1674
  if (isArrayInput) {
1081
1675
  const prompts = input.prompt;
1082
1676
  const texts = [];
@@ -1090,7 +1684,7 @@ var HFT_ToolCalling = async (input, model, onProgress, signal) => {
1090
1684
  tokenize: false,
1091
1685
  add_generation_prompt: true
1092
1686
  });
1093
- const streamer2 = createTextStreamer(generateText.tokenizer, onProgress);
1687
+ const streamer2 = createTextStreamer(generateText.tokenizer, onProgress, sdk);
1094
1688
  let results2 = await generateText(prompt2, {
1095
1689
  max_new_tokens: input.maxTokens ?? 1024,
1096
1690
  temperature: input.temperature ?? undefined,
@@ -1114,7 +1708,7 @@ var HFT_ToolCalling = async (input, model, onProgress, signal) => {
1114
1708
  tokenize: false,
1115
1709
  add_generation_prompt: true
1116
1710
  });
1117
- const streamer = createTextStreamer(generateText.tokenizer, onProgress);
1711
+ const streamer = createTextStreamer(generateText.tokenizer, onProgress, sdk);
1118
1712
  let results = await generateText(prompt, {
1119
1713
  max_new_tokens: input.maxTokens ?? 1024,
1120
1714
  temperature: input.temperature ?? undefined,
@@ -1134,6 +1728,7 @@ var HFT_ToolCalling = async (input, model, onProgress, signal) => {
1134
1728
  var HFT_ToolCalling_Stream = async function* (input, model, signal) {
1135
1729
  const noopProgress = () => {};
1136
1730
  const generateText = await getPipeline(model, noopProgress, {}, signal);
1731
+ const sdk = await loadTransformersSDK();
1137
1732
  const messages = toTextFlatMessages(input);
1138
1733
  const tools = resolveHFTToolsAndMessages(input, messages);
1139
1734
  const prompt = generateText.tokenizer.apply_chat_template(messages, {
@@ -1143,7 +1738,7 @@ var HFT_ToolCalling_Stream = async function* (input, model, signal) {
1143
1738
  });
1144
1739
  const innerQueue = createStreamEventQueue();
1145
1740
  const outerQueue = createStreamEventQueue();
1146
- const streamer = createStreamingTextStreamer(generateText.tokenizer, innerQueue);
1741
+ const streamer = createStreamingTextStreamer(generateText.tokenizer, innerQueue, sdk);
1147
1742
  let fullText = "";
1148
1743
  const filter = createToolCallMarkupFilter((text) => {
1149
1744
  outerQueue.push({ type: "text-delta", port: "text", textDelta: text });
@@ -1188,147 +1783,61 @@ var HFT_ToolCalling_Stream = async function* (input, model, signal) {
1188
1783
  data: { text: cleanedText, toolCalls: validToolCalls }
1189
1784
  };
1190
1785
  };
1191
- var HFT_ModelInfo = async (input, model) => {
1192
- const logger = getLogger();
1193
- const { ModelRegistry } = await loadTransformersSDK();
1194
- const timerLabel = `hft:ModelInfo:${model?.provider_config.model_path}`;
1195
- logger.time(timerLabel, { model: model?.provider_config.model_path });
1196
- const detail = input.detail;
1197
- const is_loaded = pipelines.has(getPipelineCacheKey(model));
1198
- const { pipeline: pipelineType, model_path, dtype, device } = model.provider_config;
1199
- const cacheStatus = await ModelRegistry.is_pipeline_cached_files(pipelineType, model_path, {
1200
- ...dtype ? { dtype } : {}
1201
- });
1202
- logger.debug("is_pipeline_cached", {
1203
- input: [
1204
- pipelineType,
1205
- model_path,
1206
- {
1207
- ...dtype ? { dtype } : {}
1208
- }
1209
- ],
1210
- result: cacheStatus
1211
- });
1212
- const is_cached = is_loaded || cacheStatus.allCached;
1213
- let file_sizes = null;
1214
- if (detail === "files" && cacheStatus.files.length > 0) {
1215
- const sizes = {};
1216
- for (const { file } of cacheStatus.files) {
1217
- sizes[file] = 0;
1218
- }
1219
- file_sizes = sizes;
1220
- } else if (detail === "files_with_metadata" && cacheStatus.files.length > 0) {
1221
- const sizes = {};
1222
- await Promise.all(cacheStatus.files.map(async ({ file }) => {
1223
- const metadata = await ModelRegistry.get_file_metadata(model_path, file);
1224
- if (metadata.exists && metadata.size !== undefined) {
1225
- sizes[file] = metadata.size;
1226
- }
1227
- }));
1228
- if (Object.keys(sizes).length > 0) {
1229
- file_sizes = sizes;
1230
- }
1231
- }
1232
- logger.timeEnd(timerLabel, { model: model?.provider_config.model_path });
1233
- return {
1234
- model: input.model,
1235
- is_local: true,
1236
- is_remote: false,
1237
- supports_browser: true,
1238
- supports_node: true,
1239
- is_cached,
1240
- is_loaded,
1241
- file_sizes
1242
- };
1243
- };
1244
- function buildStructuredGenerationPrompt(input) {
1245
- const schemaStr = JSON.stringify(input.outputSchema, null, 2);
1246
- return `${input.prompt}
1247
1786
 
1248
- You MUST respond with ONLY a valid JSON object conforming to this JSON schema:
1249
- ${schemaStr}
1250
-
1251
- Output ONLY the JSON object, no other text.`;
1787
+ // src/provider-hf-transformers/common/HFT_Unload.ts
1788
+ init_HFT_Pipeline();
1789
+ function hasBrowserCacheStorage() {
1790
+ return typeof globalThis !== "undefined" && "caches" in globalThis && typeof globalThis.caches?.open === "function";
1252
1791
  }
1253
- function extractJsonFromText(text) {
1254
- try {
1255
- return JSON.parse(text);
1256
- } catch {
1257
- const match = text.match(/\{[\s\S]*\}/);
1258
- if (match) {
1259
- try {
1260
- return JSON.parse(match[0]);
1261
- } catch {
1262
- return parsePartialJson(match[0]) ?? {};
1792
+ async function deleteModelCacheFromBrowser(model_path) {
1793
+ const cachesApi = globalThis.caches;
1794
+ const cache = await cachesApi.open(HTF_CACHE_NAME);
1795
+ const keys = await cache.keys();
1796
+ const prefix = `/${model_path}/`;
1797
+ const requestsToDelete = [];
1798
+ for (const request of keys) {
1799
+ const url = new URL(request.url);
1800
+ if (url.pathname.startsWith(prefix)) {
1801
+ requestsToDelete.push(request);
1802
+ }
1803
+ }
1804
+ for (const request of requestsToDelete) {
1805
+ try {
1806
+ const deleted = await cache.delete(request);
1807
+ if (!deleted) {
1808
+ const deletedByUrl = await cache.delete(request.url);
1809
+ if (!deletedByUrl) {}
1263
1810
  }
1811
+ } catch (error) {
1812
+ console.error(`Failed to delete cache entry: ${request.url}`, error);
1264
1813
  }
1265
- return {};
1266
1814
  }
1267
1815
  }
1268
- var HFT_StructuredGeneration = async (input, model, onProgress, signal) => {
1269
- const generateText = await getPipeline(model, onProgress, {}, signal);
1270
- const prompt = buildStructuredGenerationPrompt(input);
1271
- const messages = [{ role: "user", content: prompt }];
1272
- const formattedPrompt = generateText.tokenizer.apply_chat_template(messages, {
1273
- tokenize: false,
1274
- add_generation_prompt: true
1275
- });
1276
- const streamer = createTextStreamer(generateText.tokenizer, onProgress);
1277
- let results = await generateText(formattedPrompt, {
1278
- max_new_tokens: input.maxTokens ?? 1024,
1279
- temperature: input.temperature ?? undefined,
1280
- return_full_text: false,
1281
- streamer
1816
+ async function deleteModelCacheFromFilesystem(model) {
1817
+ const { ModelRegistry } = await loadTransformersSDK();
1818
+ const { pipeline: pipelineType, model_path, dtype } = model.provider_config;
1819
+ await ModelRegistry.clear_pipeline_cache(pipelineType, model_path, {
1820
+ ...dtype ? { dtype } : {}
1282
1821
  });
1283
- if (!Array.isArray(results)) {
1284
- results = [results];
1822
+ }
1823
+ var HFT_Unload = async (input, model, onProgress, _signal) => {
1824
+ const cacheKey = getPipelineCacheKey(model);
1825
+ if (removeCachedPipeline(cacheKey)) {
1826
+ onProgress(50, "Pipeline removed from memory");
1285
1827
  }
1286
- const responseText = extractGeneratedText(results[0]?.generated_text).trim();
1287
- const object = extractJsonFromText(responseText);
1288
- return { object };
1289
- };
1290
- var HFT_StructuredGeneration_Stream = async function* (input, model, signal) {
1291
- const noopProgress = () => {};
1292
- const generateText = await getPipeline(model, noopProgress, {}, signal);
1293
- const prompt = buildStructuredGenerationPrompt(input);
1294
- const messages = [{ role: "user", content: prompt }];
1295
- const formattedPrompt = generateText.tokenizer.apply_chat_template(messages, {
1296
- tokenize: false,
1297
- add_generation_prompt: true
1298
- });
1299
- const queue = createStreamEventQueue();
1300
- const streamer = createStreamingTextStreamer(generateText.tokenizer, queue);
1301
- let fullText = "";
1302
- const originalPush = queue.push;
1303
- queue.push = (event) => {
1304
- if (event.type === "text-delta" && "textDelta" in event) {
1305
- fullText += event.textDelta;
1306
- const match = fullText.match(/\{[\s\S]*/);
1307
- if (match) {
1308
- const partial = parsePartialJson(match[0]);
1309
- if (partial !== undefined) {
1310
- originalPush({
1311
- type: "object-delta",
1312
- port: "object",
1313
- objectDelta: partial
1314
- });
1315
- return;
1316
- }
1317
- }
1318
- }
1319
- originalPush(event);
1828
+ const model_path = model.provider_config.model_path;
1829
+ if (hasBrowserCacheStorage()) {
1830
+ await deleteModelCacheFromBrowser(model_path);
1831
+ } else {
1832
+ await deleteModelCacheFromFilesystem(model);
1833
+ }
1834
+ onProgress(100, "Model cache deleted");
1835
+ return {
1836
+ model: input.model
1320
1837
  };
1321
- const pipelinePromise = generateText(formattedPrompt, {
1322
- max_new_tokens: input.maxTokens ?? 1024,
1323
- temperature: input.temperature ?? undefined,
1324
- return_full_text: false,
1325
- streamer
1326
- }).then(() => queue.done(), (err) => queue.error(err));
1327
- yield* queue.iterable;
1328
- await pipelinePromise;
1329
- const object = extractJsonFromText(fullText);
1330
- yield { type: "finish", data: { object } };
1331
1838
  };
1839
+
1840
+ // src/provider-hf-transformers/common/HFT_JobRunFns.ts
1332
1841
  var HFT_TASKS = {
1333
1842
  DownloadModelTask: HFT_Download,
1334
1843
  UnloadModelTask: HFT_Unload,
@@ -1351,7 +1860,8 @@ var HFT_TASKS = {
1351
1860
  ImageClassificationTask: HFT_ImageClassification,
1352
1861
  ObjectDetectionTask: HFT_ObjectDetection,
1353
1862
  ToolCallingTask: HFT_ToolCalling,
1354
- StructuredGenerationTask: HFT_StructuredGeneration
1863
+ StructuredGenerationTask: HFT_StructuredGeneration,
1864
+ ModelSearchTask: HFT_ModelSearch
1355
1865
  };
1356
1866
  var HFT_STREAM_TASKS = {
1357
1867
  TextGenerationTask: HFT_TextGeneration_Stream,
@@ -1366,6 +1876,130 @@ var HFT_REACTIVE_TASKS = {
1366
1876
  CountTokensTask: HFT_CountTokens_Reactive
1367
1877
  };
1368
1878
 
1369
- export { clearPipelineCache, HFT_Download, HFT_Unload, HFT_TextEmbedding, HFT_TextClassification, HFT_TextLanguageDetection, HFT_TextNamedEntityRecognition, HFT_TextFillMask, HFT_TextGeneration, HFT_TextTranslation, HFT_TextRewriter, HFT_TextSummary, HFT_TextQuestionAnswer, HFT_ImageSegmentation, HFT_ImageToText, HFT_BackgroundRemoval, HFT_ImageEmbedding, HFT_ImageClassification, HFT_ObjectDetection, createToolCallMarkupFilter, HFT_TextGeneration_Stream, HFT_TextRewriter_Stream, HFT_TextSummary_Stream, HFT_TextQuestionAnswer_Stream, HFT_TextTranslation_Stream, HFT_CountTokens, HFT_CountTokens_Reactive, parseToolCallsFromText, HFT_ToolCalling, HFT_ToolCalling_Stream, HFT_ModelInfo, HFT_StructuredGeneration, HFT_StructuredGeneration_Stream, HFT_TASKS, HFT_STREAM_TASKS, HFT_REACTIVE_TASKS };
1879
+ // src/provider-hf-transformers/HuggingFaceTransformersQueuedProvider.ts
1880
+ import {
1881
+ QueuedAiProvider
1882
+ } from "@workglow/ai";
1883
+ class HuggingFaceTransformersQueuedProvider extends QueuedAiProvider {
1884
+ name = HF_TRANSFORMERS_ONNX;
1885
+ isLocal = true;
1886
+ supportsBrowser = true;
1887
+ taskTypes = [
1888
+ "DownloadModelTask",
1889
+ "UnloadModelTask",
1890
+ "ModelInfoTask",
1891
+ "CountTokensTask",
1892
+ "TextEmbeddingTask",
1893
+ "TextGenerationTask",
1894
+ "TextQuestionAnswerTask",
1895
+ "TextLanguageDetectionTask",
1896
+ "TextClassificationTask",
1897
+ "TextFillMaskTask",
1898
+ "TextNamedEntityRecognitionTask",
1899
+ "TextRewriterTask",
1900
+ "TextSummaryTask",
1901
+ "TextTranslationTask",
1902
+ "ImageSegmentationTask",
1903
+ "ImageToTextTask",
1904
+ "BackgroundRemovalTask",
1905
+ "ImageEmbeddingTask",
1906
+ "ImageClassificationTask",
1907
+ "ObjectDetectionTask",
1908
+ "ToolCallingTask",
1909
+ "ModelSearchTask"
1910
+ ];
1911
+ constructor(tasks, streamTasks, reactiveTasks) {
1912
+ super(tasks, streamTasks, reactiveTasks);
1913
+ }
1914
+ }
1915
+
1916
+ // src/provider-hf-transformers/registerHuggingFaceTransformersInline.ts
1917
+ init_HFT_Pipeline();
1918
+ async function registerHuggingFaceTransformersInline(options) {
1919
+ const { env } = await loadTransformersSDK();
1920
+ env.backends.onnx.wasm.proxy = true;
1921
+ const provider = new HuggingFaceTransformersQueuedProvider(HFT_TASKS, HFT_STREAM_TASKS, HFT_REACTIVE_TASKS);
1922
+ const baseDispose = provider.dispose.bind(provider);
1923
+ provider.dispose = async () => {
1924
+ await clearHftInlinePipelineCache();
1925
+ await baseDispose();
1926
+ };
1927
+ await provider.register(options ?? {});
1928
+ }
1929
+ // src/provider-hf-transformers/registerHuggingFaceTransformersWorker.ts
1930
+ import { getLogger as getLogger7, globalServiceRegistry, WORKER_SERVER } from "@workglow/util/worker";
1931
+
1932
+ // src/provider-hf-transformers/HuggingFaceTransformersProvider.ts
1933
+ import {
1934
+ AiProvider
1935
+ } from "@workglow/ai/worker";
1936
+ class HuggingFaceTransformersProvider extends AiProvider {
1937
+ name = HF_TRANSFORMERS_ONNX;
1938
+ isLocal = true;
1939
+ supportsBrowser = true;
1940
+ taskTypes = [
1941
+ "DownloadModelTask",
1942
+ "UnloadModelTask",
1943
+ "ModelInfoTask",
1944
+ "CountTokensTask",
1945
+ "TextEmbeddingTask",
1946
+ "TextGenerationTask",
1947
+ "TextQuestionAnswerTask",
1948
+ "TextLanguageDetectionTask",
1949
+ "TextClassificationTask",
1950
+ "TextFillMaskTask",
1951
+ "TextNamedEntityRecognitionTask",
1952
+ "TextRewriterTask",
1953
+ "TextSummaryTask",
1954
+ "TextTranslationTask",
1955
+ "ImageSegmentationTask",
1956
+ "ImageToTextTask",
1957
+ "BackgroundRemovalTask",
1958
+ "ImageEmbeddingTask",
1959
+ "ImageClassificationTask",
1960
+ "ObjectDetectionTask",
1961
+ "ToolCallingTask",
1962
+ "ModelSearchTask"
1963
+ ];
1964
+ constructor(tasks, streamTasks, reactiveTasks) {
1965
+ super(tasks, streamTasks, reactiveTasks);
1966
+ }
1967
+ }
1968
+
1969
+ // src/provider-hf-transformers/registerHuggingFaceTransformersWorker.ts
1970
+ init_HFT_Pipeline();
1971
+ async function registerHuggingFaceTransformersWorker() {
1972
+ const { env } = await loadTransformersSDK();
1973
+ env.backends.onnx.wasm.proxy = true;
1974
+ const workerServer = globalServiceRegistry.get(WORKER_SERVER);
1975
+ new HuggingFaceTransformersProvider(HFT_TASKS, HFT_STREAM_TASKS, HFT_REACTIVE_TASKS).registerOnWorkerServer(workerServer);
1976
+ workerServer.sendReady();
1977
+ getLogger7().info("HuggingFaceTransformers worker job run functions registered");
1978
+ }
1979
+
1980
+ // src/provider-hf-transformers/runtime.ts
1981
+ init_HFT_Pipeline();
1982
+ export {
1983
+ setHftCacheDir,
1984
+ removeCachedPipeline,
1985
+ registerHuggingFaceTransformersWorker,
1986
+ registerHuggingFaceTransformersInline,
1987
+ parseToolCallsFromText,
1988
+ parseOnnxQuantizations,
1989
+ loadTransformersSDK,
1990
+ hasCachedPipeline,
1991
+ getPipelineCacheKey,
1992
+ getPipeline,
1993
+ createToolCallMarkupFilter,
1994
+ clearPipelineCache,
1995
+ QuantizationDataType,
1996
+ PipelineUseCase,
1997
+ ONNX_QUANTIZATION_SUFFIX_MAPPING,
1998
+ HfTransformersOnnxModelSchema,
1999
+ HfTransformersOnnxModelRecordSchema,
2000
+ HfTransformersOnnxModelConfigSchema,
2001
+ HTF_CACHE_NAME,
2002
+ HF_TRANSFORMERS_ONNX
2003
+ };
1370
2004
 
1371
- //# debugId=84170CBF73939EA164756E2164756E21
2005
+ //# debugId=1B109CAD7F3983A264756E2164756E21