@genai-fi/nanogpt 0.9.1 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (343) hide show
  1. package/README.md +352 -14
  2. package/dist/Generator.js +69 -78
  3. package/dist/{RealDiv-D4EzDsC0.js → RealDiv-DgA3z9oO.js} +32 -206
  4. package/dist/Reshape-CF6odzV4.js +16 -0
  5. package/dist/Reshape-_kILl6tK.js +81 -0
  6. package/dist/TeachableLLM.js +28 -22
  7. package/dist/Trainer.d.ts +2 -0
  8. package/dist/Trainer.js +3 -2
  9. package/dist/{axis_util-TbGYJ208.js → axis_util-BvHEw88j.js} +7 -23
  10. package/dist/backend.d.ts +2 -1
  11. package/dist/backend.js +10 -4
  12. package/dist/backend_util-D-rUb2ty.js +474 -0
  13. package/dist/backend_webgpu-B0u2ndUn.js +547 -0
  14. package/dist/binary_op_util-pKXltfxI.js +192 -0
  15. package/dist/broadcast_to-CwF7XIeu.js +30 -0
  16. package/dist/checks/appendCache.js +2 -2
  17. package/dist/checks/attentionMask.js +3 -3
  18. package/dist/checks/check.d.ts +1 -1
  19. package/dist/checks/check.js +8 -8
  20. package/dist/checks/gelu.js +2 -2
  21. package/dist/checks/index.d.ts +2 -0
  22. package/dist/checks/index.js +7 -5
  23. package/dist/checks/matMulGelu.js +6 -6
  24. package/dist/checks/normRMS.js +7 -7
  25. package/dist/checks/normRMSGrad.js +3 -3
  26. package/dist/checks/packUnpack.d.ts +1 -0
  27. package/dist/checks/packUnpack.js +18 -0
  28. package/dist/checks/qkv.js +12 -27
  29. package/dist/checks/rope.js +2 -2
  30. package/dist/checks/weights.js +18 -16
  31. package/dist/complex-CSlYz-2T.js +13 -0
  32. package/dist/complex_util-Yc1A_gV1.js +55 -0
  33. package/dist/concat-BHlIJeyT.js +19 -0
  34. package/dist/concat_util-DcJk7YHS.js +22 -0
  35. package/dist/data/docx.js +1 -1
  36. package/dist/data/parquet.js +2 -2
  37. package/dist/data/pdf.js +1 -1
  38. package/dist/data/textLoader.js +1 -1
  39. package/dist/{dataset-DlZtKmBq.js → dataset-0xP8GjwI.js} +136 -236
  40. package/dist/dropout-C1pM3f11.js +99 -0
  41. package/dist/expand_dims-BPG4fwBP.js +13 -0
  42. package/dist/exports_initializers-xuidcwI4.js +7 -0
  43. package/dist/gather-DykLGqmW.js +10 -0
  44. package/dist/{gelu-Bp_-935b.js → gelu-CNLFZWea.js} +11 -10
  45. package/dist/{gpgpu_math-CDaYiyE_.js → gpgpu_math-DDVJCn6-.js} +90 -265
  46. package/dist/{index-C4L8Cm77.js → index-CieiGp4Y.js} +14 -14
  47. package/dist/index-CjOj7j-u.js +7308 -0
  48. package/dist/{index-Tf7vU29b.js → index-Cp39cXWe.js} +3 -10
  49. package/dist/{index-Dwqa6Zy2.js → index-DvYrXKkX.js} +2 -2
  50. package/dist/index-ZyQhjEPo.js +2157 -0
  51. package/dist/{jszip.min-CjP2V1VV.js → jszip.min-Bz5-11Bk.js} +56 -57
  52. package/dist/kernel_funcs_utils-Dg_-E44D.js +308 -0
  53. package/dist/layers/BaseLayer.d.ts +1 -0
  54. package/dist/layers/BaseLayer.js +7 -6
  55. package/dist/layers/CausalSelfAttention.d.ts +0 -1
  56. package/dist/layers/CausalSelfAttention.js +56 -55
  57. package/dist/layers/MLP.js +15 -16
  58. package/dist/layers/PositionEmbedding.js +5 -14
  59. package/dist/layers/RMSNorm.js +3 -3
  60. package/dist/layers/RoPECache.d.ts +2 -0
  61. package/dist/layers/RoPECache.js +22 -17
  62. package/dist/layers/TiedEmbedding.js +22 -17
  63. package/dist/layers/TransformerBlock.js +21 -20
  64. package/dist/loader/load.js +1 -1
  65. package/dist/loader/loadTransformers.js +1 -1
  66. package/dist/loader/oldZipLoad.js +39 -33
  67. package/dist/loader/save.js +1 -1
  68. package/dist/log_sum_exp-DWI-76TI.js +41 -0
  69. package/dist/main.d.ts +8 -0
  70. package/dist/main.js +63 -52
  71. package/dist/matMul16--R5hOwDG.js +77 -0
  72. package/dist/mat_mul-DeAh4uTH.js +12 -0
  73. package/dist/mod-Gt1rMB4n.js +12 -0
  74. package/dist/models/NanoGPTV1.js +40 -31
  75. package/dist/models/model.d.ts +2 -0
  76. package/dist/models/model.js +37 -29
  77. package/dist/{mulmat_packed_gpu-BT60jmzP.js → mulmat_packed_gpu-BMFhLwta.js} +1 -17
  78. package/dist/{non_max_suppression_impl-CsEgBuMA.js → non_max_suppression_impl-B2W7YjZB.js} +0 -32
  79. package/dist/ones-CAMiP4I2.js +15 -0
  80. package/dist/ops/adamAdjust.js +1 -1
  81. package/dist/ops/adamMoments.d.ts +1 -1
  82. package/dist/ops/adamMoments.js +4 -4
  83. package/dist/ops/add16.d.ts +2 -0
  84. package/dist/ops/add16.js +9 -0
  85. package/dist/ops/appendCache.js +16 -9
  86. package/dist/ops/attentionMask.js +4 -4
  87. package/dist/ops/concat16.d.ts +2 -0
  88. package/dist/ops/concat16.js +9 -0
  89. package/dist/ops/cpu/adamAdjust.js +14 -13
  90. package/dist/ops/cpu/adamMoments.js +10 -9
  91. package/dist/ops/cpu/appendCache.js +9 -8
  92. package/dist/ops/cpu/attentionMask.js +15 -14
  93. package/dist/ops/cpu/fusedSoftmax.js +13 -12
  94. package/dist/ops/cpu/gatherSub.js +9 -24
  95. package/dist/ops/cpu/gelu.js +13 -12
  96. package/dist/ops/cpu/matMul16.d.ts +1 -0
  97. package/dist/ops/cpu/matMul16.js +16 -0
  98. package/dist/ops/cpu/matMulGelu.js +18 -16
  99. package/dist/ops/cpu/matMulMul.js +8 -7
  100. package/dist/ops/cpu/mulDropout.js +4 -3
  101. package/dist/ops/cpu/normRMS.js +11 -10
  102. package/dist/ops/cpu/qkv.js +17 -13
  103. package/dist/ops/cpu/rope.js +23 -22
  104. package/dist/ops/cpu/scatterSub.js +16 -30
  105. package/dist/ops/dot16.d.ts +2 -0
  106. package/dist/ops/dot16.js +42 -0
  107. package/dist/ops/gatherSub.js +1 -1
  108. package/dist/ops/gelu.js +2 -2
  109. package/dist/ops/grads/add16.d.ts +1 -0
  110. package/dist/ops/grads/add16.js +27 -0
  111. package/dist/ops/grads/attentionMask.js +12 -19
  112. package/dist/ops/grads/gelu.js +4 -3
  113. package/dist/ops/grads/matMul16.d.ts +2 -0
  114. package/dist/ops/grads/matMul16.js +9 -0
  115. package/dist/ops/grads/matMulGelu.js +8 -7
  116. package/dist/ops/grads/normRMS.js +8 -7
  117. package/dist/ops/grads/{fusedSoftmax.d.ts → pack16.d.ts} +1 -1
  118. package/dist/ops/grads/pack16.js +7 -0
  119. package/dist/ops/grads/qkv.d.ts +3 -1
  120. package/dist/ops/grads/qkv.js +28 -22
  121. package/dist/ops/grads/rope.d.ts +2 -1
  122. package/dist/ops/grads/rope.js +6 -13
  123. package/dist/ops/grads/softmax16.d.ts +2 -0
  124. package/dist/ops/grads/softmax16.js +26 -0
  125. package/dist/ops/grads/unpack16.d.ts +2 -0
  126. package/dist/ops/grads/unpack16.js +6 -0
  127. package/dist/ops/grads/utils.d.ts +3 -0
  128. package/dist/ops/grads/utils.js +10 -0
  129. package/dist/ops/matMul16.d.ts +15 -0
  130. package/dist/ops/matMul16.js +13 -0
  131. package/dist/ops/matMulGelu.js +1 -1
  132. package/dist/ops/matMulMul.js +1 -1
  133. package/dist/ops/mul16.d.ts +2 -0
  134. package/dist/ops/mul16.js +8 -0
  135. package/dist/ops/mulDrop.js +1 -1
  136. package/dist/ops/normRMS.js +1 -1
  137. package/dist/ops/pack16.d.ts +2 -0
  138. package/dist/ops/pack16.js +6 -0
  139. package/dist/ops/qkv.d.ts +1 -1
  140. package/dist/ops/qkv.js +8 -4
  141. package/dist/ops/reshape16.d.ts +2 -0
  142. package/dist/ops/reshape16.js +43 -0
  143. package/dist/ops/rope.d.ts +1 -1
  144. package/dist/ops/rope.js +8 -10
  145. package/dist/ops/scatterSub.js +1 -1
  146. package/dist/ops/slice16.d.ts +2 -0
  147. package/dist/ops/slice16.js +9 -0
  148. package/dist/ops/softmax16.d.ts +2 -0
  149. package/dist/ops/softmax16.js +12 -0
  150. package/dist/ops/sub16.d.ts +2 -0
  151. package/dist/ops/sub16.js +8 -0
  152. package/dist/ops/sum16.d.ts +2 -0
  153. package/dist/ops/sum16.js +13 -0
  154. package/dist/ops/transpose16.d.ts +3 -0
  155. package/dist/ops/transpose16.js +41 -0
  156. package/dist/ops/unpack16.d.ts +2 -0
  157. package/dist/ops/unpack16.js +6 -0
  158. package/dist/ops/webgl/adamAdjust.js +3 -2
  159. package/dist/ops/webgl/adamMoments.js +2 -1
  160. package/dist/ops/webgl/appendCache.js +2 -1
  161. package/dist/ops/webgl/attentionMask.js +5 -4
  162. package/dist/ops/webgl/fusedSoftmax.js +6 -4
  163. package/dist/ops/webgl/gatherSub.js +7 -6
  164. package/dist/ops/webgl/gelu.js +3 -2
  165. package/dist/ops/webgl/log.js +12 -27
  166. package/dist/ops/webgl/matMul16.d.ts +1 -0
  167. package/dist/ops/webgl/matMul16.js +37 -0
  168. package/dist/ops/webgl/matMulGelu.js +17 -15
  169. package/dist/ops/webgl/matMulMul.js +13 -12
  170. package/dist/ops/webgl/mulDropout.js +9 -8
  171. package/dist/ops/webgl/normRMS.js +8 -7
  172. package/dist/ops/webgl/qkv.js +6 -5
  173. package/dist/ops/webgl/rope.js +11 -10
  174. package/dist/ops/webgl/scatterSub.js +6 -5
  175. package/dist/ops/webgpu/adamAdjust.js +12 -10
  176. package/dist/ops/webgpu/adamMoments.js +27 -22
  177. package/dist/ops/webgpu/add16.d.ts +1 -0
  178. package/dist/ops/webgpu/add16.js +14 -0
  179. package/dist/ops/webgpu/appendCache.js +64 -17
  180. package/dist/ops/webgpu/attentionMask.js +19 -62
  181. package/dist/ops/webgpu/attentionMask32_program.d.ts +19 -0
  182. package/dist/ops/webgpu/attentionMask32_program.js +54 -0
  183. package/dist/ops/webgpu/concat16.d.ts +19 -0
  184. package/dist/ops/webgpu/concat16.js +128 -0
  185. package/dist/ops/webgpu/gatherSub.js +9 -7
  186. package/dist/ops/webgpu/gelu.js +78 -31
  187. package/dist/ops/webgpu/index.js +12 -0
  188. package/dist/ops/webgpu/matMul16.d.ts +1 -0
  189. package/dist/ops/webgpu/matMul16.js +58 -0
  190. package/dist/ops/webgpu/matMul16_program.d.ts +42 -0
  191. package/dist/ops/webgpu/matMul16_program.js +336 -0
  192. package/dist/ops/webgpu/mul16.d.ts +1 -0
  193. package/dist/ops/webgpu/mul16.js +14 -0
  194. package/dist/ops/webgpu/normRMS.js +21 -40
  195. package/dist/ops/webgpu/normRMS16_program.d.ts +9 -0
  196. package/dist/ops/webgpu/normRMS16_program.js +24 -0
  197. package/dist/ops/webgpu/normRMS32_program.d.ts +9 -0
  198. package/dist/ops/webgpu/normRMS32_program.js +24 -0
  199. package/dist/ops/webgpu/normRMSGrad.js +113 -64
  200. package/dist/ops/webgpu/pack16.d.ts +1 -0
  201. package/dist/ops/webgpu/pack16.js +19 -0
  202. package/dist/ops/webgpu/pack16_program.d.ts +19 -0
  203. package/dist/ops/webgpu/pack16_program.js +92 -0
  204. package/dist/ops/webgpu/qkv.js +20 -55
  205. package/dist/ops/webgpu/rope.js +77 -22
  206. package/dist/ops/webgpu/scatterSub.js +9 -7
  207. package/dist/ops/webgpu/slice16.d.ts +7 -0
  208. package/dist/ops/webgpu/slice16.js +71 -0
  209. package/dist/{variable-Bm2OFwGI.js → ops/webgpu/softmax16.d.ts} +2 -8
  210. package/dist/ops/webgpu/softmax16.js +23 -0
  211. package/dist/ops/webgpu/softmax16_program.d.ts +13 -0
  212. package/dist/ops/webgpu/softmax16_program.js +73 -0
  213. package/dist/ops/webgpu/softmax16_subgroup_program.d.ts +17 -0
  214. package/dist/ops/webgpu/softmax16_subgroup_program.js +75 -0
  215. package/dist/ops/webgpu/softmax16grad.d.ts +1 -0
  216. package/dist/ops/webgpu/softmax16grad.js +38 -0
  217. package/dist/ops/webgpu/sub16.d.ts +1 -0
  218. package/dist/ops/webgpu/sub16.js +14 -0
  219. package/dist/ops/webgpu/sum16.d.ts +1 -0
  220. package/dist/ops/webgpu/sum16.js +40 -0
  221. package/dist/ops/webgpu/transpose16.d.ts +1 -0
  222. package/dist/ops/webgpu/transpose16.js +35 -0
  223. package/dist/ops/webgpu/transpose16_program.d.ts +16 -0
  224. package/dist/ops/webgpu/transpose16_program.js +50 -0
  225. package/dist/ops/webgpu/transpose16_shared_program.d.ts +15 -0
  226. package/dist/ops/webgpu/transpose16_shared_program.js +71 -0
  227. package/dist/ops/webgpu/unpack16.d.ts +1 -0
  228. package/dist/ops/webgpu/unpack16.js +49 -0
  229. package/dist/ops/webgpu/utils/binary_op.d.ts +19 -0
  230. package/dist/ops/webgpu/utils/binary_op.js +79 -0
  231. package/dist/ops/webgpu/utils/deviceInfo.d.ts +7 -0
  232. package/dist/ops/webgpu/utils/deviceInfo.js +11 -0
  233. package/dist/ops/webgpu/utils/reductions.d.ts +32 -4
  234. package/dist/ops/webgpu/utils/reductions.js +236 -45
  235. package/dist/ops-CNI3TwqM.js +645 -0
  236. package/dist/pack16-CFUqumar.js +41 -0
  237. package/dist/{papaparse.min-C8l2Kvo1.js → papaparse.min-C0cScC2i.js} +2 -8
  238. package/dist/{parquet-C0Tlmv9c.js → parquet-BE8MU_ge.js} +201 -278
  239. package/dist/patches/PackedTensor.d.ts +12 -0
  240. package/dist/patches/PackedTensor.js +11 -0
  241. package/dist/patches/engine.d.ts +261 -0
  242. package/dist/patches/engine.js +10 -0
  243. package/dist/patches/tape.d.ts +12 -0
  244. package/dist/patches/tape.js +5 -0
  245. package/dist/patches/webgpu_backend.d.ts +18 -0
  246. package/dist/patches/webgpu_backend.js +57 -0
  247. package/dist/{tensor-CZr4dh61.js → patches/webgpu_base.d.ts} +5 -8
  248. package/dist/patches/webgpu_base.js +34 -0
  249. package/dist/patches/webgpu_program.d.ts +36 -0
  250. package/dist/patches/webgpu_program.js +401 -0
  251. package/dist/{pdf-kJD-f258.js → pdf-NIhmP3sq.js} +424 -428
  252. package/dist/random_width-DY6Kk2Dl.js +10051 -0
  253. package/dist/range-BMS52eQi.js +11 -0
  254. package/dist/reciprocal-CTmshQ9J.js +10 -0
  255. package/dist/{register_all_kernels-DIGpEwcf.js → register_all_kernels-Bwu1PTuU.js} +719 -9766
  256. package/dist/relu-yZ2-7WxU.js +10 -0
  257. package/dist/reshape-DevtBWtf.js +10 -0
  258. package/dist/rope-B5UUMsPi.js +32 -0
  259. package/dist/{scatter_nd_util-BQdz--Gn.js → scatter_nd_util-5EL-8VAQ.js} +1 -1
  260. package/dist/selu_util-D1w6yyTO.js +303 -0
  261. package/dist/{shared-DuP7ue-R.js → shared-BRksrJb3.js} +1 -17
  262. package/dist/shared-BuAXb4CI.js +2145 -0
  263. package/dist/sin-BGfy2HZo.js +16 -0
  264. package/dist/slice-D_gkkqZK.js +13 -0
  265. package/dist/slice_util-DtEldBfK.js +261 -0
  266. package/dist/softmax-ZHVebtR1.js +13 -0
  267. package/dist/split-DrfihRpZ.js +10 -0
  268. package/dist/squeeze-DZEpeblb.js +11 -0
  269. package/dist/stack-yOIAalTq.js +13 -0
  270. package/dist/sum-_fzj5ZTB.js +12 -0
  271. package/dist/tensor-DdQUJZlz.js +909 -0
  272. package/dist/tensor-f35l8Odg.js +8 -0
  273. package/dist/tensor1d-CeZuc-Rv.js +12 -0
  274. package/dist/tensor2d-G4Ys2GxX.js +15 -0
  275. package/dist/tensor4d-B8roDgtc.js +15 -0
  276. package/dist/tensor_util-DV-FP5Q3.js +523 -0
  277. package/dist/tfjs_backend-kNyO5L2d.js +653 -0
  278. package/dist/tile-BzyEiF-F.js +13 -0
  279. package/dist/tokeniser/CharTokeniser.js +1 -1
  280. package/dist/tokeniser/bpe.js +1 -1
  281. package/dist/training/Adam.d.ts +2 -1
  282. package/dist/training/Adam.js +12 -28
  283. package/dist/training/AdamExt.d.ts +1 -0
  284. package/dist/training/AdamExt.js +2 -2
  285. package/dist/training/DatasetBuilder.js +3 -20
  286. package/dist/training/FullTrainer.js +55 -48
  287. package/dist/training/Trainer.d.ts +11 -6
  288. package/dist/training/Trainer.js +51 -39
  289. package/dist/training/sparseCrossEntropy.js +3 -3
  290. package/dist/transpose-DKELTqhe.js +38 -0
  291. package/dist/utilities/arrayClose.js +7 -7
  292. package/dist/utilities/dummy.js +35 -27
  293. package/dist/utilities/multinomialCPU.js +2 -2
  294. package/dist/utilities/packed.d.ts +7 -0
  295. package/dist/utilities/packed.js +716 -0
  296. package/dist/utilities/performance.js +1 -1
  297. package/dist/utilities/profile.js +1 -1
  298. package/dist/utilities/safetensors.js +2 -2
  299. package/dist/utilities/sentences.d.ts +5 -0
  300. package/dist/utilities/sentences.js +41 -0
  301. package/dist/utilities/weights.js +2 -2
  302. package/dist/variable-Bhn5bHYv.js +7 -0
  303. package/dist/{webgpu_program-DkQJOJSd.js → webgpu_program-Cigz-7RF.js} +15 -44
  304. package/dist/webgpu_util-BBCnKm2X.js +65 -0
  305. package/dist/zeros-2gldETuK.js +14 -0
  306. package/package.json +4 -3
  307. package/dist/Reshape-Bowtk9BP.js +0 -127
  308. package/dist/Reshape-DUqYftGC.js +0 -30
  309. package/dist/backend_util-CJIiDoV1.js +0 -749
  310. package/dist/broadcast_to-DzlNweb8.js +0 -44
  311. package/dist/concat-B912vBbo.js +0 -33
  312. package/dist/dropout-C-csYCLj.js +0 -193
  313. package/dist/exports_initializers-B8iZMgQ0.js +0 -16
  314. package/dist/gather-Dnpgw-YQ.js +0 -25
  315. package/dist/index-BzFyqcy-.js +0 -4457
  316. package/dist/index-C1rx_Ajs.js +0 -12076
  317. package/dist/kernel_funcs_utils-DKLK0Mg3.js +0 -466
  318. package/dist/log_sum_exp-DO6z8tSE.js +0 -103
  319. package/dist/mat_mul-DzjTFx-u.js +0 -27
  320. package/dist/mod-Dobti4j4.js +0 -27
  321. package/dist/ones-tIJeHlq-.js +0 -29
  322. package/dist/ops/fusedSoftmax.d.ts +0 -2
  323. package/dist/ops/fusedSoftmax.js +0 -10
  324. package/dist/ops/grads/fusedSoftmax.js +0 -22
  325. package/dist/ops-LuCMAnmM.js +0 -1525
  326. package/dist/random_width-CXVRloNK.js +0 -13670
  327. package/dist/range-CWcz7xFA.js +0 -26
  328. package/dist/reciprocal-C4rNcM-S.js +0 -25
  329. package/dist/relu-BjCh_SYb.js +0 -25
  330. package/dist/reshape-CnIwVG1c.js +0 -25
  331. package/dist/selu_util-OtRzVwW5.js +0 -719
  332. package/dist/shared-DmRsFyaJ.js +0 -3134
  333. package/dist/sin-gpDNRxE0.js +0 -47
  334. package/dist/slice-d0Vo9XTN.js +0 -28
  335. package/dist/softmax-D7Jj3p_P.js +0 -28
  336. package/dist/split-DK2k5eHf.js +0 -25
  337. package/dist/stack-DFatutCx.js +0 -27
  338. package/dist/sum-CJ0ULhmt.js +0 -27
  339. package/dist/tensor1d-vML0r3q6.js +0 -27
  340. package/dist/tensor2d-D76QGjF3.js +0 -30
  341. package/dist/tensor4d-Df1WlVDY.js +0 -30
  342. package/dist/webgpu_util-pLEV9tks.js +0 -80
  343. package/dist/zeros-Bj5rMYA7.js +0 -52
@@ -0,0 +1,7 @@
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { b as t } from "../../pack16-CFUqumar.js";
3
+ import "../../slice-D_gkkqZK.js";
4
+ import "../../tensor_util-DV-FP5Q3.js";
5
+ export {
6
+ t as packGradConfig
7
+ };
@@ -1 +1,3 @@
1
- export {};
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ import { NamedGradientMap } from '@tensorflow/tfjs-core/dist/tape';
3
+ export declare function qkvGrad(dy: Tensor[], x: Tensor, kernel: Tensor): NamedGradientMap;
@@ -1,30 +1,36 @@
1
- import { i as Q } from "../../index-BzFyqcy-.js";
2
- const V = {
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { a as u } from "../../matMul16--R5hOwDG.js";
3
+ import { concat16 as f } from "../concat16.js";
4
+ import { sum16 as g } from "../sum16.js";
5
+ import { packTensor as k, isPackedTensor as l } from "../../utilities/packed.js";
6
+ import { a as h } from "../../tensor_util-DV-FP5Q3.js";
7
+ import { s as G } from "../../squeeze-DZEpeblb.js";
8
+ const m = {
3
9
  kernelName: "QKV",
4
10
  inputsToSave: ["x", "kernel"],
5
11
  outputsToSave: [],
6
- gradFunc: (x, K) => {
7
- const [f, h, M] = x, [p, l] = K, [t, n, e] = p.shape, i = f.transpose([0, 2, 1, 3]).reshape([t * n, e]), u = h.transpose([0, 2, 1, 3]).reshape([t * n, e]), k = M.transpose([0, 2, 1, 3]).reshape([t * n, e]);
8
- return {
9
- x: () => {
10
- const s = l.slice([0, 0], [e, e]), o = i.matMul(s, !1, !0);
11
- s.dispose();
12
- const d = l.slice([0, e], [e, e]), r = u.matMul(d, !1, !0);
13
- d.dispose();
14
- const a = o.add(r);
15
- o.dispose(), r.dispose();
16
- const c = l.slice([0, 2 * e], [e, e]), m = k.matMul(c, !1, !0);
17
- c.dispose();
18
- const v = a.add(m).reshape([t, n, e]);
19
- return a.dispose(), m.dispose(), v;
20
- },
12
+ gradFunc: (e, s) => {
13
+ const [o, n, t] = e, [a] = s, p = f([o, n, t], 1);
14
+ o.dispose(), n.dispose(), t.dispose();
15
+ const c = [a.shape[0], a.shape[1], 3 * a.shape[2]], i = u.gradFunc(p, s, {
16
+ transposeA: !1,
17
+ transposeB: !1,
18
+ originalShape: c,
19
+ perm: [0, 2, 1, 3]
20
+ });
21
+ return p.dispose(), {
22
+ x: () => i.A(),
21
23
  kernel: () => {
22
- const s = p.reshape([t * n, e]), o = s.matMul(i, !0, !1), d = s.matMul(u, !0, !1), r = o.concat(d, 1);
23
- o.dispose(), d.dispose();
24
- const a = s.matMul(k, !0, !1), c = r.concat(a, 1);
25
- return r.dispose(), a.dispose(), s.dispose(), c;
24
+ const r = i.B(), d = r.shape[0] === 1 ? G(r, [0]) : g(r, 0);
25
+ return r.dispose(), l(r) ? k(d) : d;
26
26
  }
27
27
  };
28
28
  }
29
29
  };
30
- Q(V);
30
+ function A(e, s, o) {
31
+ return m.gradFunc(e, [s, o], {});
32
+ }
33
+ h(m);
34
+ export {
35
+ A as qkvGrad
36
+ };
@@ -1 +1,2 @@
1
- export {};
1
+ import { GradConfig } from '@tensorflow/tfjs-core';
2
+ export declare const ropeGradConfig: GradConfig;
@@ -1,14 +1,7 @@
1
- import { i, e as a } from "../../index-BzFyqcy-.js";
2
- function p(n, e, s, o) {
3
- return a().runKernel("Rope", { x: n, sin: e, cos: s }, { pastLen: o });
4
- }
5
- const c = {
6
- kernelName: "Rope",
7
- inputsToSave: ["sin", "cos"],
8
- outputsToSave: [],
9
- gradFunc: (n, e) => {
10
- const [s, o] = e, t = s.neg(), r = p(n, t, o, 0);
11
- return t.dispose(), { x: () => r };
12
- }
1
+ import "../../utilities/packed.js";
2
+ import "../../index-ZyQhjEPo.js";
3
+ import { a as t } from "../../rope-B5UUMsPi.js";
4
+ import "../../tensor_util-DV-FP5Q3.js";
5
+ export {
6
+ t as ropeGradConfig
13
7
  };
14
- i(c);
@@ -0,0 +1,2 @@
1
+ import { GradConfig } from '@tensorflow/tfjs-core';
2
+ export declare const softmax16GradConfig: GradConfig;
@@ -0,0 +1,26 @@
1
+ import { e as n } from "../../index-ZyQhjEPo.js";
2
+ import { isPackedTensor as t } from "../../utilities/packed.js";
3
+ import { a } from "../../tensor_util-DV-FP5Q3.js";
4
+ function s(r, e) {
5
+ return n().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
6
+ }
7
+ const i = {
8
+ kernelName: "Softmax16",
9
+ outputsToSave: [!0],
10
+ gradFunc: (r, e) => {
11
+ const [o] = e;
12
+ if (Array.isArray(r))
13
+ throw new Error("Expected dy to be a single Tensor");
14
+ if (!t(o))
15
+ throw console.error(o), new Error("Softmax16 gradient requires packed y Tensor");
16
+ if (!t(r))
17
+ throw new Error("Softmax16 gradient requires packed dy Tensor");
18
+ return {
19
+ logits: () => s(r, o)
20
+ };
21
+ }
22
+ };
23
+ a(i);
24
+ export {
25
+ i as softmax16GradConfig
26
+ };
@@ -0,0 +1,2 @@
1
+ import { GradConfig } from '@tensorflow/tfjs-core';
2
+ export declare const unpackGradConfig: GradConfig;
@@ -0,0 +1,6 @@
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { a as i } from "../../pack16-CFUqumar.js";
3
+ import "../../tensor_util-DV-FP5Q3.js";
4
+ export {
5
+ i as unpackGradConfig
6
+ };
@@ -0,0 +1,3 @@
1
+ import { TensorInfo } from '@tensorflow/tfjs-core';
2
+ export declare function forceFloat<T extends TensorInfo>(x: T): T;
3
+ export declare function forceInt<T extends TensorInfo>(x: T): T;
@@ -0,0 +1,10 @@
1
+ function n(t) {
2
+ return t.dtype = "float32", t;
3
+ }
4
+ function e(t) {
5
+ return t.dtype = "int32", t;
6
+ }
7
+ export {
8
+ n as forceFloat,
9
+ e as forceInt
10
+ };
@@ -0,0 +1,15 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function matMul16(A: Tensor, B: Tensor, transposeA?: boolean, transposeB?: boolean, attrs?: {
3
+ scale?: number;
4
+ scaleA?: number;
5
+ scaleB?: number;
6
+ activation?: 'gelu';
7
+ forceOutputShape?: number[];
8
+ perm?: number[];
9
+ causalMask?: boolean;
10
+ pastLen?: number;
11
+ }): Tensor;
12
+ export declare function matMul16Scaled(A: Tensor, B: Tensor, scale: number, transposeA?: boolean, transposeB?: boolean): Tensor;
13
+ export declare function matMul16ScaleA(A: Tensor, B: Tensor, scale: number, transposeA?: boolean, transposeB?: boolean): Tensor;
14
+ export declare function matMul16ScaleB(A: Tensor, B: Tensor, scale: number, transposeA?: boolean, transposeB?: boolean): Tensor;
15
+ export declare function matMul16Gelu(A: Tensor, B: Tensor, transposeA?: boolean, transposeB?: boolean): Tensor;
@@ -0,0 +1,13 @@
1
+ import "../index-ZyQhjEPo.js";
2
+ import { b as p, c as u, d as i, e as s, m as M } from "../matMul16--R5hOwDG.js";
3
+ import "./webgl/matMul16.js";
4
+ import "./cpu/matMul16.js";
5
+ import "../utilities/packed.js";
6
+ import "../pack16-CFUqumar.js";
7
+ export {
8
+ p as matMul16,
9
+ u as matMul16Gelu,
10
+ i as matMul16ScaleA,
11
+ s as matMul16ScaleB,
12
+ M as matMul16Scaled
13
+ };
@@ -1,4 +1,4 @@
1
- import { e as u } from "../index-BzFyqcy-.js";
1
+ import { e as u } from "../index-ZyQhjEPo.js";
2
2
  import "./cpu/matMulGelu.js";
3
3
  import "./webgl/matMulGelu.js";
4
4
  import "./grads/matMulGelu.js";
@@ -1,4 +1,4 @@
1
- import { e as u } from "../index-BzFyqcy-.js";
1
+ import { e as u } from "../index-ZyQhjEPo.js";
2
2
  import "./cpu/matMulMul.js";
3
3
  import "./webgl/matMulMul.js";
4
4
  function m(e, r, t, l = !1, n = !1) {
@@ -0,0 +1,2 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function mul16(a: Tensor, b: Tensor): Tensor;
@@ -0,0 +1,8 @@
1
+ import { m as t, e as u } from "../index-ZyQhjEPo.js";
2
+ import { isPackedTensor as n } from "../utilities/packed.js";
3
+ function i(r, e) {
4
+ return !n(r) && !n(e) ? t(r, e) : u().runKernel("Mul16", { a: r, b: e });
5
+ }
6
+ export {
7
+ i as mul16
8
+ };
@@ -1,4 +1,4 @@
1
- import { e as t } from "../index-BzFyqcy-.js";
1
+ import { e as t } from "../index-ZyQhjEPo.js";
2
2
  import "./cpu/mulDropout.js";
3
3
  import "./webgl/mulDropout.js";
4
4
  function m(r, o, e, n) {
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-BzFyqcy-.js";
1
+ import { e as n } from "../index-ZyQhjEPo.js";
2
2
  import "./cpu/normRMS.js";
3
3
  import "./webgl/normRMS.js";
4
4
  import "./grads/normRMS.js";
@@ -0,0 +1,2 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function pack16(x: Tensor, scaling?: number, padding?: number): Tensor;
@@ -0,0 +1,6 @@
1
+ import "../utilities/packed.js";
2
+ import "../index-ZyQhjEPo.js";
3
+ import { p as t } from "../pack16-CFUqumar.js";
4
+ export {
5
+ t as pack16
6
+ };
package/dist/ops/qkv.d.ts CHANGED
@@ -1,2 +1,2 @@
1
1
  import { Tensor } from '@tensorflow/tfjs-core';
2
- export declare function qkv(x: Tensor, kernel: Tensor, heads: number): Tensor[];
2
+ export declare function qkv(x: Tensor, kernel: Tensor, heads: number, packed?: boolean): Tensor[];
package/dist/ops/qkv.js CHANGED
@@ -1,10 +1,14 @@
1
- import { e as o } from "../index-BzFyqcy-.js";
1
+ import { e as m } from "../index-ZyQhjEPo.js";
2
2
  import "./cpu/qkv.js";
3
3
  import "./webgl/qkv.js";
4
4
  import "./grads/qkv.js";
5
- function u(r, e, n) {
6
- return o().runKernel("QKV", { x: r, kernel: e }, { heads: n });
5
+ import { packTensor as f } from "../utilities/packed.js";
6
+ function l(n, t, e, r = !1) {
7
+ const o = m().runKernel("QKV", { x: n, kernel: t }, { heads: e, packed: r });
8
+ return r && o.forEach((i) => {
9
+ f(i);
10
+ }), o;
7
11
  }
8
12
  export {
9
- u as qkv
13
+ l as qkv
10
14
  };
@@ -0,0 +1,2 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function reshape16(x: Tensor, shape: number[]): Tensor;
@@ -0,0 +1,43 @@
1
+ import { e as c } from "../index-ZyQhjEPo.js";
2
+ import { isPackedTensor as u, packTensor as i } from "../utilities/packed.js";
3
+ import { r as p } from "../reshape-DevtBWtf.js";
4
+ import { a as l, r as t } from "../tensor_util-DV-FP5Q3.js";
5
+ const m = {
6
+ kernelName: "Reshape16",
7
+ inputsToSave: ["x"],
8
+ gradFunc: (e, r) => {
9
+ const [n] = r;
10
+ if (Array.isArray(e))
11
+ throw new Error("Reshape16 gradient does not support multiple outputs.");
12
+ return { x: () => f(e, n.shape) };
13
+ }
14
+ };
15
+ l(m);
16
+ function a(e) {
17
+ const { inputs: r, attrs: n } = e, { x: s } = r, { shape: o } = n;
18
+ return u(s) ? i(p(s, o)) : p(s, o);
19
+ }
20
+ const k = {
21
+ kernelName: "Reshape16",
22
+ backendName: "webgpu",
23
+ kernelFunc: a
24
+ };
25
+ t(k);
26
+ const g = {
27
+ kernelName: "Reshape16",
28
+ backendName: "webgl",
29
+ kernelFunc: a
30
+ };
31
+ t(g);
32
+ const h = {
33
+ kernelName: "Reshape16",
34
+ backendName: "cpu",
35
+ kernelFunc: a
36
+ };
37
+ t(h);
38
+ function f(e, r) {
39
+ return c().runKernel("Reshape16", { x: e }, { shape: r });
40
+ }
41
+ export {
42
+ f as reshape16
43
+ };
@@ -1,3 +1,3 @@
1
1
  import { default as RoPECache } from '../layers/RoPECache';
2
2
  import { Tensor } from '@tensorflow/tfjs';
3
- export declare function rope(x: Tensor, cache: RoPECache, pastLength: number): Tensor;
3
+ export declare function rope(x: Tensor, cache: RoPECache, pastLength: number, negSin?: boolean): Tensor;
package/dist/ops/rope.js CHANGED
@@ -1,14 +1,12 @@
1
- import { e as p } from "../index-BzFyqcy-.js";
2
- import "../random_width-CXVRloNK.js";
3
- import "../register_all_kernels-DIGpEwcf.js";
4
- import "../index-Tf7vU29b.js";
5
- import "../dataset-DlZtKmBq.js";
1
+ import "../index-ZyQhjEPo.js";
2
+ import "../random_width-DY6Kk2Dl.js";
3
+ import "../register_all_kernels-Bwu1PTuU.js";
4
+ import "../index-Cp39cXWe.js";
5
+ import "../dataset-0xP8GjwI.js";
6
6
  import "./cpu/rope.js";
7
7
  import "./webgl/rope.js";
8
- import "./grads/rope.js";
9
- function C(r, o, e) {
10
- return o.ensureRopeCache(r.shape[1] + e), p().runKernel("Rope", { x: r, sin: o.getSin(), cos: o.getCos() }, { pastLen: e });
11
- }
8
+ import { r as x } from "../rope-B5UUMsPi.js";
9
+ import "../utilities/packed.js";
12
10
  export {
13
- C as rope
11
+ x as rope
14
12
  };
@@ -1,4 +1,4 @@
1
- import { e as i } from "../index-BzFyqcy-.js";
1
+ import { e as i } from "../index-ZyQhjEPo.js";
2
2
  import "./cpu/scatterSub.js";
3
3
  import "./webgl/scatterSub.js";
4
4
  function c(t, r, e) {
@@ -0,0 +1,2 @@
1
+ import { Rank, Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function slice16<R extends Rank = Rank>(x: Tensor<R>, begin: number | number[], size: number | number[]): Tensor<R>;
@@ -0,0 +1,9 @@
1
+ import { isPackedTensor as n } from "../utilities/packed.js";
2
+ import { e as c } from "../index-ZyQhjEPo.js";
3
+ import { s as i } from "../slice-D_gkkqZK.js";
4
+ function a(r, e, o) {
5
+ return n(r) ? c().runKernel("Slice16", { x: r }, { begin: e, size: o }) : i(r, e, o);
6
+ }
7
+ export {
8
+ a as slice16
9
+ };
@@ -0,0 +1,2 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function softmax16(logits: Tensor): Tensor;
@@ -0,0 +1,12 @@
1
+ import { e } from "../index-ZyQhjEPo.js";
2
+ import "./grads/softmax16.js";
3
+ import { isPackedTensor as m, packTensor as a } from "../utilities/packed.js";
4
+ function p(r) {
5
+ if (!m(r))
6
+ return e().runKernel("Softmax", { logits: r }, { dim: r.rank - 1 });
7
+ const n = e().runKernel("Softmax16", { logits: r }, { dim: r.rank - 1 });
8
+ return m(r) ? a(n) : n;
9
+ }
10
+ export {
11
+ p as softmax16
12
+ };
@@ -0,0 +1,2 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function sub16(a: Tensor, b: Tensor): Tensor;
@@ -0,0 +1,8 @@
1
+ import { c as s, e as t } from "../index-ZyQhjEPo.js";
2
+ import { isPackedTensor as n } from "../utilities/packed.js";
3
+ function c(r, e) {
4
+ return !n(r) && !n(e) ? s(r, e) : t().runKernel("Sub16", { a: r, b: e });
5
+ }
6
+ export {
7
+ c as sub16
8
+ };
@@ -0,0 +1,2 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function sum16(x: Tensor, axis?: number | number[], keepDims?: boolean): Tensor;
@@ -0,0 +1,13 @@
1
+ import { e as t } from "../index-ZyQhjEPo.js";
2
+ import { isPackedTensor as s } from "../utilities/packed.js";
3
+ import { s as n } from "../sum-_fzj5ZTB.js";
4
+ function p(r, o, e = !1) {
5
+ if (!s(r))
6
+ return n(r, o, e);
7
+ if (e)
8
+ throw new Error("sum16 with keepDims=true not supported for packed tensors");
9
+ return t().runKernel("Sum16", { x: r }, { axis: o ?? -1, keepDims: e });
10
+ }
11
+ export {
12
+ p as sum16
13
+ };
@@ -0,0 +1,3 @@
1
+ import { GradConfig, Tensor } from '@tensorflow/tfjs-core';
2
+ export declare const transpose16GradConfig: GradConfig;
3
+ export declare function transpose16(x: Tensor, perm?: number[]): Tensor;
@@ -0,0 +1,41 @@
1
+ import { e as i } from "../index-ZyQhjEPo.js";
2
+ import { forceInt as u, forceFloat as l } from "./grads/utils.js";
3
+ import { g as m } from "../axis_util-BvHEw88j.js";
4
+ import { isPackedTensor as f, packTensor as g } from "../utilities/packed.js";
5
+ import { t as a } from "../transpose-DKELTqhe.js";
6
+ import { a as d, r as p } from "../tensor_util-DV-FP5Q3.js";
7
+ const k = {
8
+ kernelName: "Transpose16",
9
+ gradFunc: (e, s, o) => {
10
+ if (Array.isArray(e))
11
+ throw new Error("Transpose16 gradient does not support multiple outputs.");
12
+ const n = o, { perm: r } = n, t = m(r);
13
+ return { x: () => w(e, t) };
14
+ }
15
+ };
16
+ d(k);
17
+ function c(e) {
18
+ const { inputs: s, attrs: o } = e, { x: n } = s, { perm: r } = o, t = f(n);
19
+ if (t && r[r.length - 1] !== n.shape.length - 1)
20
+ throw new Error("Transpose16 currently only supports the last axis being unchanged.");
21
+ return t ? g(u(a(l(n), r))) : a(n, r);
22
+ }
23
+ const h = {
24
+ kernelName: "Transpose16",
25
+ backendName: "webgl",
26
+ kernelFunc: c
27
+ };
28
+ p(h);
29
+ const T = {
30
+ kernelName: "Transpose16",
31
+ backendName: "cpu",
32
+ kernelFunc: c
33
+ };
34
+ p(T);
35
+ function w(e, s) {
36
+ return s == null && (s = e.shape.map((n, r) => r).reverse()), i().runKernel("Transpose16", { x: e }, { perm: s });
37
+ }
38
+ export {
39
+ w as transpose16,
40
+ k as transpose16GradConfig
41
+ };
@@ -0,0 +1,2 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function unpack16(x: Tensor, scaling?: number, disposeArg?: boolean): Tensor;
@@ -0,0 +1,6 @@
1
+ import "../index-ZyQhjEPo.js";
2
+ import { u as t } from "../pack16-CFUqumar.js";
3
+ import "../utilities/packed.js";
4
+ export {
5
+ t as unpack16
6
+ };
@@ -1,5 +1,6 @@
1
- import { r as n } from "../../Reshape-Bowtk9BP.js";
2
- import { f } from "../../index-BzFyqcy-.js";
1
+ import { r as n } from "../../Reshape-_kILl6tK.js";
2
+ import "../../index-ZyQhjEPo.js";
3
+ import { r as f } from "../../tensor_util-DV-FP5Q3.js";
3
4
  class v {
4
5
  variableNames = ["moments", "value"];
5
6
  outputShape;
@@ -1,4 +1,5 @@
1
- import { f as m } from "../../index-BzFyqcy-.js";
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { r as m } from "../../tensor_util-DV-FP5Q3.js";
2
3
  class i {
3
4
  variableNames = ["moments", "gradient"];
4
5
  outputShape;
@@ -1,4 +1,5 @@
1
- import { f as p } from "../../index-BzFyqcy-.js";
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { r as p } from "../../tensor_util-DV-FP5Q3.js";
2
3
  class m {
3
4
  variableNames = ["cache", "item"];
4
5
  outputShape;
@@ -1,4 +1,5 @@
1
- import { f as m } from "../../index-BzFyqcy-.js";
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { r as d } from "../../tensor_util-DV-FP5Q3.js";
2
3
  class h {
3
4
  variableNames = ["q", "k"];
4
5
  outputShape;
@@ -34,12 +35,12 @@ class h {
34
35
  }
35
36
  }
36
37
  function l(o) {
37
- const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], d = new h(i, u, r, c, p);
38
- return a.runWebGLProgram(d, [t, e], "float32", [[s], [n], [Number.NEGATIVE_INFINITY]]);
38
+ const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], m = new h(i, u, r, c, p);
39
+ return a.runWebGLProgram(m, [t, e], "float32", [[s], [n], [Number.NEGATIVE_INFINITY]]);
39
40
  }
40
41
  const f = {
41
42
  kernelName: "AttentionMask",
42
43
  backendName: "webgl",
43
44
  kernelFunc: l
44
45
  };
45
- m(f);
46
+ d(f);
@@ -1,7 +1,9 @@
1
- import { m as b, s as I, r as k } from "../../RealDiv-D4EzDsC0.js";
2
- import { r as v } from "../../Reshape-Bowtk9BP.js";
3
- import { f as w, p as P } from "../../index-BzFyqcy-.js";
4
- import { e as S } from "../../axis_util-TbGYJ208.js";
1
+ import { m as b, s as I, r as k } from "../../RealDiv-DgA3z9oO.js";
2
+ import { r as v } from "../../Reshape-_kILl6tK.js";
3
+ import "../../index-ZyQhjEPo.js";
4
+ import { r as w } from "../../tensor_util-DV-FP5Q3.js";
5
+ import { p as P } from "../../tensor-DdQUJZlz.js";
6
+ import { e as S } from "../../axis_util-BvHEw88j.js";
5
7
  class T {
6
8
  variableNames = ["logits", "maxLogits"];
7
9
  outputShape;
@@ -1,5 +1,6 @@
1
- import { f as l } from "../../index-BzFyqcy-.js";
2
- class u {
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { r as i } from "../../tensor_util-DV-FP5Q3.js";
3
+ class l {
3
4
  variableNames = ["labels", "logits", "values"];
4
5
  outputShape;
5
6
  userCode;
@@ -15,13 +16,13 @@ class u {
15
16
  `;
16
17
  }
17
18
  }
18
- function i(t) {
19
- const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new u(r);
19
+ function u(t) {
20
+ const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new l(r);
20
21
  return a.runWebGLProgram(n, [o, e, s], "float32");
21
22
  }
22
23
  const c = {
23
24
  kernelName: "EfficientGatherSub",
24
25
  backendName: "webgl",
25
- kernelFunc: i
26
+ kernelFunc: u
26
27
  };
27
- l(c);
28
+ i(c);
@@ -1,5 +1,6 @@
1
- import { f as a } from "../../index-BzFyqcy-.js";
2
- import { u as s, C as i } from "../../kernel_funcs_utils-DKLK0Mg3.js";
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { u as s, C as i } from "../../kernel_funcs_utils-Dg_-E44D.js";
3
+ import { r as a } from "../../tensor_util-DV-FP5Q3.js";
3
4
  const t = 0.7978845608028654, r = 0.044715, c = i + `
4
5
  float x3 = x * x * x;
5
6
  float inner = x + ${r} * x3;