@genai-fi/nanogpt 0.9.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (343) hide show
  1. package/README.md +352 -14
  2. package/dist/Generator.js +69 -78
  3. package/dist/{RealDiv-D4EzDsC0.js → RealDiv-DgA3z9oO.js} +32 -206
  4. package/dist/Reshape-CF6odzV4.js +16 -0
  5. package/dist/Reshape-_kILl6tK.js +81 -0
  6. package/dist/TeachableLLM.js +28 -22
  7. package/dist/Trainer.d.ts +2 -0
  8. package/dist/Trainer.js +3 -2
  9. package/dist/{axis_util-TbGYJ208.js → axis_util-BvHEw88j.js} +7 -23
  10. package/dist/backend.d.ts +2 -1
  11. package/dist/backend.js +10 -4
  12. package/dist/backend_util-D-rUb2ty.js +474 -0
  13. package/dist/backend_webgpu-B0u2ndUn.js +547 -0
  14. package/dist/binary_op_util-pKXltfxI.js +192 -0
  15. package/dist/broadcast_to-CwF7XIeu.js +30 -0
  16. package/dist/checks/appendCache.js +2 -2
  17. package/dist/checks/attentionMask.js +3 -3
  18. package/dist/checks/check.d.ts +1 -1
  19. package/dist/checks/check.js +8 -8
  20. package/dist/checks/gelu.js +2 -2
  21. package/dist/checks/index.d.ts +2 -0
  22. package/dist/checks/index.js +7 -5
  23. package/dist/checks/matMulGelu.js +6 -6
  24. package/dist/checks/normRMS.js +7 -7
  25. package/dist/checks/normRMSGrad.js +3 -3
  26. package/dist/checks/packUnpack.d.ts +1 -0
  27. package/dist/checks/packUnpack.js +18 -0
  28. package/dist/checks/qkv.js +12 -27
  29. package/dist/checks/rope.js +2 -2
  30. package/dist/checks/weights.js +18 -16
  31. package/dist/complex-CSlYz-2T.js +13 -0
  32. package/dist/complex_util-Yc1A_gV1.js +55 -0
  33. package/dist/concat-BHlIJeyT.js +19 -0
  34. package/dist/concat_util-DcJk7YHS.js +22 -0
  35. package/dist/data/docx.js +1 -1
  36. package/dist/data/parquet.js +2 -2
  37. package/dist/data/pdf.js +1 -1
  38. package/dist/data/textLoader.js +1 -1
  39. package/dist/{dataset-DlZtKmBq.js → dataset-0xP8GjwI.js} +136 -236
  40. package/dist/dropout-C1pM3f11.js +99 -0
  41. package/dist/expand_dims-BPG4fwBP.js +13 -0
  42. package/dist/exports_initializers-xuidcwI4.js +7 -0
  43. package/dist/gather-DykLGqmW.js +10 -0
  44. package/dist/{gelu-Bp_-935b.js → gelu-CNLFZWea.js} +11 -10
  45. package/dist/{gpgpu_math-CDaYiyE_.js → gpgpu_math-DDVJCn6-.js} +90 -265
  46. package/dist/{index-C4L8Cm77.js → index-CieiGp4Y.js} +14 -14
  47. package/dist/index-CjOj7j-u.js +7308 -0
  48. package/dist/{index-Tf7vU29b.js → index-Cp39cXWe.js} +3 -10
  49. package/dist/{index-Dwqa6Zy2.js → index-DvYrXKkX.js} +2 -2
  50. package/dist/index-ZyQhjEPo.js +2157 -0
  51. package/dist/{jszip.min-CjP2V1VV.js → jszip.min-Bz5-11Bk.js} +56 -57
  52. package/dist/kernel_funcs_utils-Dg_-E44D.js +308 -0
  53. package/dist/layers/BaseLayer.d.ts +1 -0
  54. package/dist/layers/BaseLayer.js +7 -6
  55. package/dist/layers/CausalSelfAttention.d.ts +0 -1
  56. package/dist/layers/CausalSelfAttention.js +56 -55
  57. package/dist/layers/MLP.js +15 -16
  58. package/dist/layers/PositionEmbedding.js +5 -14
  59. package/dist/layers/RMSNorm.js +3 -3
  60. package/dist/layers/RoPECache.d.ts +2 -0
  61. package/dist/layers/RoPECache.js +22 -17
  62. package/dist/layers/TiedEmbedding.js +22 -17
  63. package/dist/layers/TransformerBlock.js +21 -20
  64. package/dist/loader/load.js +1 -1
  65. package/dist/loader/loadTransformers.js +1 -1
  66. package/dist/loader/oldZipLoad.js +39 -33
  67. package/dist/loader/save.js +1 -1
  68. package/dist/log_sum_exp-DWI-76TI.js +41 -0
  69. package/dist/main.d.ts +8 -0
  70. package/dist/main.js +63 -52
  71. package/dist/matMul16--R5hOwDG.js +77 -0
  72. package/dist/mat_mul-DeAh4uTH.js +12 -0
  73. package/dist/mod-Gt1rMB4n.js +12 -0
  74. package/dist/models/NanoGPTV1.js +40 -31
  75. package/dist/models/model.d.ts +2 -0
  76. package/dist/models/model.js +37 -29
  77. package/dist/{mulmat_packed_gpu-BT60jmzP.js → mulmat_packed_gpu-BMFhLwta.js} +1 -17
  78. package/dist/{non_max_suppression_impl-CsEgBuMA.js → non_max_suppression_impl-B2W7YjZB.js} +0 -32
  79. package/dist/ones-CAMiP4I2.js +15 -0
  80. package/dist/ops/adamAdjust.js +1 -1
  81. package/dist/ops/adamMoments.d.ts +1 -1
  82. package/dist/ops/adamMoments.js +4 -4
  83. package/dist/ops/add16.d.ts +2 -0
  84. package/dist/ops/add16.js +9 -0
  85. package/dist/ops/appendCache.js +16 -9
  86. package/dist/ops/attentionMask.js +4 -4
  87. package/dist/ops/concat16.d.ts +2 -0
  88. package/dist/ops/concat16.js +9 -0
  89. package/dist/ops/cpu/adamAdjust.js +14 -13
  90. package/dist/ops/cpu/adamMoments.js +10 -9
  91. package/dist/ops/cpu/appendCache.js +9 -8
  92. package/dist/ops/cpu/attentionMask.js +15 -14
  93. package/dist/ops/cpu/fusedSoftmax.js +13 -12
  94. package/dist/ops/cpu/gatherSub.js +9 -24
  95. package/dist/ops/cpu/gelu.js +13 -12
  96. package/dist/ops/cpu/matMul16.d.ts +1 -0
  97. package/dist/ops/cpu/matMul16.js +16 -0
  98. package/dist/ops/cpu/matMulGelu.js +18 -16
  99. package/dist/ops/cpu/matMulMul.js +8 -7
  100. package/dist/ops/cpu/mulDropout.js +4 -3
  101. package/dist/ops/cpu/normRMS.js +11 -10
  102. package/dist/ops/cpu/qkv.js +17 -13
  103. package/dist/ops/cpu/rope.js +23 -22
  104. package/dist/ops/cpu/scatterSub.js +16 -30
  105. package/dist/ops/dot16.d.ts +2 -0
  106. package/dist/ops/dot16.js +42 -0
  107. package/dist/ops/gatherSub.js +1 -1
  108. package/dist/ops/gelu.js +2 -2
  109. package/dist/ops/grads/add16.d.ts +1 -0
  110. package/dist/ops/grads/add16.js +27 -0
  111. package/dist/ops/grads/attentionMask.js +12 -19
  112. package/dist/ops/grads/gelu.js +4 -3
  113. package/dist/ops/grads/matMul16.d.ts +2 -0
  114. package/dist/ops/grads/matMul16.js +9 -0
  115. package/dist/ops/grads/matMulGelu.js +8 -7
  116. package/dist/ops/grads/normRMS.js +8 -7
  117. package/dist/ops/grads/{fusedSoftmax.d.ts → pack16.d.ts} +1 -1
  118. package/dist/ops/grads/pack16.js +7 -0
  119. package/dist/ops/grads/qkv.d.ts +3 -1
  120. package/dist/ops/grads/qkv.js +28 -22
  121. package/dist/ops/grads/rope.d.ts +2 -1
  122. package/dist/ops/grads/rope.js +6 -13
  123. package/dist/ops/grads/softmax16.d.ts +2 -0
  124. package/dist/ops/grads/softmax16.js +26 -0
  125. package/dist/ops/grads/unpack16.d.ts +2 -0
  126. package/dist/ops/grads/unpack16.js +6 -0
  127. package/dist/ops/grads/utils.d.ts +3 -0
  128. package/dist/ops/grads/utils.js +10 -0
  129. package/dist/ops/matMul16.d.ts +15 -0
  130. package/dist/ops/matMul16.js +13 -0
  131. package/dist/ops/matMulGelu.js +1 -1
  132. package/dist/ops/matMulMul.js +1 -1
  133. package/dist/ops/mul16.d.ts +2 -0
  134. package/dist/ops/mul16.js +8 -0
  135. package/dist/ops/mulDrop.js +1 -1
  136. package/dist/ops/normRMS.js +1 -1
  137. package/dist/ops/pack16.d.ts +2 -0
  138. package/dist/ops/pack16.js +6 -0
  139. package/dist/ops/qkv.d.ts +1 -1
  140. package/dist/ops/qkv.js +8 -4
  141. package/dist/ops/reshape16.d.ts +2 -0
  142. package/dist/ops/reshape16.js +43 -0
  143. package/dist/ops/rope.d.ts +1 -1
  144. package/dist/ops/rope.js +8 -10
  145. package/dist/ops/scatterSub.js +1 -1
  146. package/dist/ops/slice16.d.ts +2 -0
  147. package/dist/ops/slice16.js +9 -0
  148. package/dist/ops/softmax16.d.ts +2 -0
  149. package/dist/ops/softmax16.js +12 -0
  150. package/dist/ops/sub16.d.ts +2 -0
  151. package/dist/ops/sub16.js +8 -0
  152. package/dist/ops/sum16.d.ts +2 -0
  153. package/dist/ops/sum16.js +13 -0
  154. package/dist/ops/transpose16.d.ts +3 -0
  155. package/dist/ops/transpose16.js +41 -0
  156. package/dist/ops/unpack16.d.ts +2 -0
  157. package/dist/ops/unpack16.js +6 -0
  158. package/dist/ops/webgl/adamAdjust.js +3 -2
  159. package/dist/ops/webgl/adamMoments.js +2 -1
  160. package/dist/ops/webgl/appendCache.js +2 -1
  161. package/dist/ops/webgl/attentionMask.js +5 -4
  162. package/dist/ops/webgl/fusedSoftmax.js +6 -4
  163. package/dist/ops/webgl/gatherSub.js +7 -6
  164. package/dist/ops/webgl/gelu.js +3 -2
  165. package/dist/ops/webgl/log.js +12 -27
  166. package/dist/ops/webgl/matMul16.d.ts +1 -0
  167. package/dist/ops/webgl/matMul16.js +37 -0
  168. package/dist/ops/webgl/matMulGelu.js +17 -15
  169. package/dist/ops/webgl/matMulMul.js +13 -12
  170. package/dist/ops/webgl/mulDropout.js +9 -8
  171. package/dist/ops/webgl/normRMS.js +8 -7
  172. package/dist/ops/webgl/qkv.js +6 -5
  173. package/dist/ops/webgl/rope.js +11 -10
  174. package/dist/ops/webgl/scatterSub.js +6 -5
  175. package/dist/ops/webgpu/adamAdjust.js +12 -10
  176. package/dist/ops/webgpu/adamMoments.js +27 -22
  177. package/dist/ops/webgpu/add16.d.ts +1 -0
  178. package/dist/ops/webgpu/add16.js +14 -0
  179. package/dist/ops/webgpu/appendCache.js +64 -17
  180. package/dist/ops/webgpu/attentionMask.js +19 -62
  181. package/dist/ops/webgpu/attentionMask32_program.d.ts +19 -0
  182. package/dist/ops/webgpu/attentionMask32_program.js +54 -0
  183. package/dist/ops/webgpu/concat16.d.ts +19 -0
  184. package/dist/ops/webgpu/concat16.js +128 -0
  185. package/dist/ops/webgpu/gatherSub.js +9 -7
  186. package/dist/ops/webgpu/gelu.js +78 -31
  187. package/dist/ops/webgpu/index.js +12 -0
  188. package/dist/ops/webgpu/matMul16.d.ts +1 -0
  189. package/dist/ops/webgpu/matMul16.js +58 -0
  190. package/dist/ops/webgpu/matMul16_program.d.ts +42 -0
  191. package/dist/ops/webgpu/matMul16_program.js +336 -0
  192. package/dist/ops/webgpu/mul16.d.ts +1 -0
  193. package/dist/ops/webgpu/mul16.js +14 -0
  194. package/dist/ops/webgpu/normRMS.js +21 -40
  195. package/dist/ops/webgpu/normRMS16_program.d.ts +9 -0
  196. package/dist/ops/webgpu/normRMS16_program.js +24 -0
  197. package/dist/ops/webgpu/normRMS32_program.d.ts +9 -0
  198. package/dist/ops/webgpu/normRMS32_program.js +24 -0
  199. package/dist/ops/webgpu/normRMSGrad.js +113 -64
  200. package/dist/ops/webgpu/pack16.d.ts +1 -0
  201. package/dist/ops/webgpu/pack16.js +19 -0
  202. package/dist/ops/webgpu/pack16_program.d.ts +19 -0
  203. package/dist/ops/webgpu/pack16_program.js +92 -0
  204. package/dist/ops/webgpu/qkv.js +20 -55
  205. package/dist/ops/webgpu/rope.js +77 -22
  206. package/dist/ops/webgpu/scatterSub.js +9 -7
  207. package/dist/ops/webgpu/slice16.d.ts +7 -0
  208. package/dist/ops/webgpu/slice16.js +71 -0
  209. package/dist/{variable-Bm2OFwGI.js → ops/webgpu/softmax16.d.ts} +2 -8
  210. package/dist/ops/webgpu/softmax16.js +23 -0
  211. package/dist/ops/webgpu/softmax16_program.d.ts +13 -0
  212. package/dist/ops/webgpu/softmax16_program.js +73 -0
  213. package/dist/ops/webgpu/softmax16_subgroup_program.d.ts +17 -0
  214. package/dist/ops/webgpu/softmax16_subgroup_program.js +75 -0
  215. package/dist/ops/webgpu/softmax16grad.d.ts +1 -0
  216. package/dist/ops/webgpu/softmax16grad.js +38 -0
  217. package/dist/ops/webgpu/sub16.d.ts +1 -0
  218. package/dist/ops/webgpu/sub16.js +14 -0
  219. package/dist/ops/webgpu/sum16.d.ts +1 -0
  220. package/dist/ops/webgpu/sum16.js +40 -0
  221. package/dist/ops/webgpu/transpose16.d.ts +1 -0
  222. package/dist/ops/webgpu/transpose16.js +35 -0
  223. package/dist/ops/webgpu/transpose16_program.d.ts +16 -0
  224. package/dist/ops/webgpu/transpose16_program.js +50 -0
  225. package/dist/ops/webgpu/transpose16_shared_program.d.ts +15 -0
  226. package/dist/ops/webgpu/transpose16_shared_program.js +71 -0
  227. package/dist/ops/webgpu/unpack16.d.ts +1 -0
  228. package/dist/ops/webgpu/unpack16.js +49 -0
  229. package/dist/ops/webgpu/utils/binary_op.d.ts +19 -0
  230. package/dist/ops/webgpu/utils/binary_op.js +79 -0
  231. package/dist/ops/webgpu/utils/deviceInfo.d.ts +7 -0
  232. package/dist/ops/webgpu/utils/deviceInfo.js +11 -0
  233. package/dist/ops/webgpu/utils/reductions.d.ts +32 -4
  234. package/dist/ops/webgpu/utils/reductions.js +236 -45
  235. package/dist/ops-CNI3TwqM.js +645 -0
  236. package/dist/pack16-CFUqumar.js +41 -0
  237. package/dist/{papaparse.min-C8l2Kvo1.js → papaparse.min-C0cScC2i.js} +2 -8
  238. package/dist/{parquet-C0Tlmv9c.js → parquet-BE8MU_ge.js} +201 -278
  239. package/dist/patches/PackedTensor.d.ts +12 -0
  240. package/dist/patches/PackedTensor.js +11 -0
  241. package/dist/patches/engine.d.ts +261 -0
  242. package/dist/patches/engine.js +10 -0
  243. package/dist/patches/tape.d.ts +12 -0
  244. package/dist/patches/tape.js +5 -0
  245. package/dist/patches/webgpu_backend.d.ts +18 -0
  246. package/dist/patches/webgpu_backend.js +57 -0
  247. package/dist/{tensor-CZr4dh61.js → patches/webgpu_base.d.ts} +5 -8
  248. package/dist/patches/webgpu_base.js +34 -0
  249. package/dist/patches/webgpu_program.d.ts +36 -0
  250. package/dist/patches/webgpu_program.js +401 -0
  251. package/dist/{pdf-kJD-f258.js → pdf-NIhmP3sq.js} +424 -428
  252. package/dist/random_width-DY6Kk2Dl.js +10051 -0
  253. package/dist/range-BMS52eQi.js +11 -0
  254. package/dist/reciprocal-CTmshQ9J.js +10 -0
  255. package/dist/{register_all_kernels-DIGpEwcf.js → register_all_kernels-Bwu1PTuU.js} +719 -9766
  256. package/dist/relu-yZ2-7WxU.js +10 -0
  257. package/dist/reshape-DevtBWtf.js +10 -0
  258. package/dist/rope-B5UUMsPi.js +32 -0
  259. package/dist/{scatter_nd_util-BQdz--Gn.js → scatter_nd_util-5EL-8VAQ.js} +1 -1
  260. package/dist/selu_util-D1w6yyTO.js +303 -0
  261. package/dist/{shared-DuP7ue-R.js → shared-BRksrJb3.js} +1 -17
  262. package/dist/shared-BuAXb4CI.js +2145 -0
  263. package/dist/sin-BGfy2HZo.js +16 -0
  264. package/dist/slice-D_gkkqZK.js +13 -0
  265. package/dist/slice_util-DtEldBfK.js +261 -0
  266. package/dist/softmax-ZHVebtR1.js +13 -0
  267. package/dist/split-DrfihRpZ.js +10 -0
  268. package/dist/squeeze-DZEpeblb.js +11 -0
  269. package/dist/stack-yOIAalTq.js +13 -0
  270. package/dist/sum-_fzj5ZTB.js +12 -0
  271. package/dist/tensor-DdQUJZlz.js +909 -0
  272. package/dist/tensor-f35l8Odg.js +8 -0
  273. package/dist/tensor1d-CeZuc-Rv.js +12 -0
  274. package/dist/tensor2d-G4Ys2GxX.js +15 -0
  275. package/dist/tensor4d-B8roDgtc.js +15 -0
  276. package/dist/tensor_util-DV-FP5Q3.js +523 -0
  277. package/dist/tfjs_backend-kNyO5L2d.js +653 -0
  278. package/dist/tile-BzyEiF-F.js +13 -0
  279. package/dist/tokeniser/CharTokeniser.js +1 -1
  280. package/dist/tokeniser/bpe.js +1 -1
  281. package/dist/training/Adam.d.ts +2 -1
  282. package/dist/training/Adam.js +12 -28
  283. package/dist/training/AdamExt.d.ts +1 -0
  284. package/dist/training/AdamExt.js +2 -2
  285. package/dist/training/DatasetBuilder.js +3 -20
  286. package/dist/training/FullTrainer.js +82 -64
  287. package/dist/training/Trainer.d.ts +11 -6
  288. package/dist/training/Trainer.js +51 -39
  289. package/dist/training/sparseCrossEntropy.js +3 -3
  290. package/dist/transpose-DKELTqhe.js +38 -0
  291. package/dist/utilities/arrayClose.js +7 -7
  292. package/dist/utilities/dummy.js +35 -27
  293. package/dist/utilities/multinomialCPU.js +2 -2
  294. package/dist/utilities/packed.d.ts +7 -0
  295. package/dist/utilities/packed.js +716 -0
  296. package/dist/utilities/performance.js +1 -1
  297. package/dist/utilities/profile.js +1 -1
  298. package/dist/utilities/safetensors.js +2 -2
  299. package/dist/utilities/sentences.d.ts +5 -0
  300. package/dist/utilities/sentences.js +41 -0
  301. package/dist/utilities/weights.js +2 -2
  302. package/dist/variable-Bhn5bHYv.js +7 -0
  303. package/dist/{webgpu_program-DkQJOJSd.js → webgpu_program-Cigz-7RF.js} +15 -44
  304. package/dist/webgpu_util-BBCnKm2X.js +65 -0
  305. package/dist/zeros-2gldETuK.js +14 -0
  306. package/package.json +4 -3
  307. package/dist/Reshape-Bowtk9BP.js +0 -127
  308. package/dist/Reshape-DUqYftGC.js +0 -30
  309. package/dist/backend_util-CJIiDoV1.js +0 -749
  310. package/dist/broadcast_to-DzlNweb8.js +0 -44
  311. package/dist/concat-B912vBbo.js +0 -33
  312. package/dist/dropout-C-csYCLj.js +0 -193
  313. package/dist/exports_initializers-B8iZMgQ0.js +0 -16
  314. package/dist/gather-Dnpgw-YQ.js +0 -25
  315. package/dist/index-BzFyqcy-.js +0 -4457
  316. package/dist/index-C1rx_Ajs.js +0 -12076
  317. package/dist/kernel_funcs_utils-DKLK0Mg3.js +0 -466
  318. package/dist/log_sum_exp-DO6z8tSE.js +0 -103
  319. package/dist/mat_mul-DzjTFx-u.js +0 -27
  320. package/dist/mod-Dobti4j4.js +0 -27
  321. package/dist/ones-tIJeHlq-.js +0 -29
  322. package/dist/ops/fusedSoftmax.d.ts +0 -2
  323. package/dist/ops/fusedSoftmax.js +0 -10
  324. package/dist/ops/grads/fusedSoftmax.js +0 -22
  325. package/dist/ops-LuCMAnmM.js +0 -1525
  326. package/dist/random_width-CXVRloNK.js +0 -13670
  327. package/dist/range-CWcz7xFA.js +0 -26
  328. package/dist/reciprocal-C4rNcM-S.js +0 -25
  329. package/dist/relu-BjCh_SYb.js +0 -25
  330. package/dist/reshape-CnIwVG1c.js +0 -25
  331. package/dist/selu_util-OtRzVwW5.js +0 -719
  332. package/dist/shared-DmRsFyaJ.js +0 -3134
  333. package/dist/sin-gpDNRxE0.js +0 -47
  334. package/dist/slice-d0Vo9XTN.js +0 -28
  335. package/dist/softmax-D7Jj3p_P.js +0 -28
  336. package/dist/split-DK2k5eHf.js +0 -25
  337. package/dist/stack-DFatutCx.js +0 -27
  338. package/dist/sum-CJ0ULhmt.js +0 -27
  339. package/dist/tensor1d-vML0r3q6.js +0 -27
  340. package/dist/tensor2d-D76QGjF3.js +0 -30
  341. package/dist/tensor4d-Df1WlVDY.js +0 -30
  342. package/dist/webgpu_util-pLEV9tks.js +0 -80
  343. package/dist/zeros-Bj5rMYA7.js +0 -52
@@ -1,29 +1,30 @@
1
- import { f as t, t as d } from "../../index-BzFyqcy-.js";
1
+ import { t as d } from "../../index-ZyQhjEPo.js";
2
+ import { r } from "../../tensor_util-DV-FP5Q3.js";
2
3
  const o = 0.7978845608028654, c = 0.044715;
3
- function m(r) {
4
- const { inputs: u } = r, { x: n } = u, e = n;
4
+ function m(t) {
5
+ const { inputs: u } = t, { x: n } = u, e = n;
5
6
  return d(() => {
6
7
  const l = e.pow(3), s = e.add(l.mul(c)).mul(o).tanh().add(1).mul(0.5);
7
8
  return e.mul(s);
8
9
  });
9
10
  }
10
- const p = {
11
+ const N = {
11
12
  kernelName: "Gelu",
12
13
  backendName: "cpu",
13
14
  kernelFunc: m
14
15
  };
15
- t(p);
16
+ r(N);
16
17
  const K = {
17
18
  kernelName: "Gelu",
18
19
  backendName: "tensorflow",
19
20
  kernelFunc: m
20
21
  };
21
- t(K);
22
- function i(r) {
23
- const { dy: u, x: n } = r.inputs;
22
+ r(K);
23
+ function i(t) {
24
+ const { dy: u, x: n } = t.inputs;
24
25
  return d(() => {
25
- const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), N = g.add(G);
26
- return u.mul(N);
26
+ const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), p = g.add(G);
27
+ return u.mul(p);
27
28
  });
28
29
  }
29
30
  const x = {
@@ -31,10 +32,10 @@ const x = {
31
32
  backendName: "cpu",
32
33
  kernelFunc: i
33
34
  };
34
- t(x);
35
+ r(x);
35
36
  const h = {
36
37
  kernelName: "GeluGrad",
37
38
  backendName: "tensorflow",
38
39
  kernelFunc: i
39
40
  };
40
- t(h);
41
+ r(h);
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,16 @@
1
+ import { isPackedTensor as t } from "../../utilities/packed.js";
2
+ import "../../index-ZyQhjEPo.js";
3
+ import { r as p } from "../../tensor_util-DV-FP5Q3.js";
4
+ import { m } from "../../mat_mul-DeAh4uTH.js";
5
+ function l(r) {
6
+ const { A: e, B: n } = r.inputs, { transposeA: o, transposeB: s } = r.attrs, a = !t(e), c = !t(n);
7
+ if (a && c)
8
+ return m(e, n, o, s);
9
+ throw new Error("MatMul16 CPU kernel only supports packed tensors currently.");
10
+ }
11
+ const u = {
12
+ kernelName: "MatMul16",
13
+ backendName: "cpu",
14
+ kernelFunc: l
15
+ };
16
+ p(u);
@@ -1,10 +1,12 @@
1
- import { f as e, t as m } from "../../index-BzFyqcy-.js";
2
- import { g as d, d as M } from "../../gelu-Bp_-935b.js";
1
+ import { t as m } from "../../index-ZyQhjEPo.js";
2
+ import { g as i, d as M } from "../../gelu-CNLFZWea.js";
3
+ import { r as e } from "../../tensor_util-DV-FP5Q3.js";
4
+ import { m as k } from "../../mat_mul-DeAh4uTH.js";
3
5
  function c(t) {
4
- const { inputs: u } = t, { x: n, kernel: l } = u, a = n, r = l;
6
+ const { inputs: u } = t, { x: n, kernel: r } = u, a = n, l = r;
5
7
  return m(() => {
6
- const o = a.matMul(r);
7
- return d(o);
8
+ const o = k(a, l);
9
+ return i(o);
8
10
  });
9
11
  }
10
12
  const G = {
@@ -13,23 +15,23 @@ const G = {
13
15
  kernelFunc: c
14
16
  };
15
17
  e(G);
16
- const i = {
18
+ const f = {
17
19
  kernelName: "MatMulGelu",
18
20
  backendName: "tensorflow",
19
21
  kernelFunc: c
20
22
  };
21
- e(i);
22
- const f = {
23
+ e(f);
24
+ const p = {
23
25
  kernelName: "MatMulGelu",
24
26
  backendName: "webgpu",
25
27
  kernelFunc: c
26
28
  };
27
- e(f);
29
+ e(p);
28
30
  function s(t) {
29
- const { dy: u, x: n, kernel: l } = t.inputs;
31
+ const { dy: u, x: n, kernel: r } = t.inputs;
30
32
  return m(() => {
31
- const a = n.matMul(l), r = M(u, a), o = r.matMul(l.transpose()), k = n.transpose().matMul(r);
32
- return [o, k];
33
+ const a = k(n, r), l = M(u, a), o = l.matMul(r.transpose()), d = n.transpose().matMul(l);
34
+ return [o, d];
33
35
  });
34
36
  }
35
37
  const g = {
@@ -38,15 +40,15 @@ const g = {
38
40
  kernelFunc: s
39
41
  };
40
42
  e(g);
41
- const p = {
43
+ const N = {
42
44
  kernelName: "MatMulGeluGrad",
43
45
  backendName: "tensorflow",
44
46
  kernelFunc: s
45
47
  };
46
- e(p);
47
- const N = {
48
+ e(N);
49
+ const b = {
48
50
  kernelName: "MatMulGeluGrad",
49
51
  backendName: "webgpu",
50
52
  kernelFunc: s
51
53
  };
52
- e(N);
54
+ e(b);
@@ -1,20 +1,21 @@
1
- import { f as e, t as i } from "../../index-BzFyqcy-.js";
1
+ import { t as M } from "../../index-ZyQhjEPo.js";
2
+ import { r as e } from "../../tensor_util-DV-FP5Q3.js";
2
3
  function n(t) {
3
- const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, k = a, M = c;
4
- return i(() => m.matMul(k, o, s).mul(M));
4
+ const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, i = a, k = c;
5
+ return M(() => m.matMul(i, o, s).mul(k));
5
6
  }
6
- const f = {
7
+ const p = {
7
8
  kernelName: "MatMulMul",
8
9
  backendName: "cpu",
9
10
  kernelFunc: n
10
11
  };
11
- e(f);
12
- const p = {
12
+ e(p);
13
+ const f = {
13
14
  kernelName: "MatMulMul",
14
15
  backendName: "tensorflow",
15
16
  kernelFunc: n
16
17
  };
17
- e(p);
18
+ e(f);
18
19
  const g = {
19
20
  kernelName: "MatMulMul",
20
21
  backendName: "webgpu",
@@ -1,7 +1,8 @@
1
- import { f as e, a as t } from "../../index-BzFyqcy-.js";
1
+ import { m as u } from "../../index-ZyQhjEPo.js";
2
+ import { r as e } from "../../tensor_util-DV-FP5Q3.js";
2
3
  function n(o) {
3
- const { inputs: r } = o, { a: l, b: u } = r;
4
- return console.warn("Using fallback mulDrop implementation without dropout."), t(l, u);
4
+ const { inputs: r } = o, { a: l, b: t } = r;
5
+ return console.warn("Using fallback mulDrop implementation without dropout."), u(l, t);
5
6
  }
6
7
  const a = {
7
8
  kernelName: "MulDropout",
@@ -1,9 +1,10 @@
1
- import { f as o, t as d } from "../../index-BzFyqcy-.js";
1
+ import { t as d } from "../../index-ZyQhjEPo.js";
2
+ import { r as a } from "../../tensor_util-DV-FP5Q3.js";
2
3
  function i(t) {
3
- const { inputs: e } = t, { x: n, gamma: s } = e, r = n, a = s;
4
+ const { inputs: e } = t, { x: n, gamma: s } = e, r = n, m = s;
4
5
  return d(() => {
5
6
  const u = r.square().mean(-1, !0).add(1e-8).rsqrt();
6
- return r.mul(u).mul(a);
7
+ return r.mul(u).mul(m);
7
8
  });
8
9
  }
9
10
  const k = {
@@ -11,18 +12,18 @@ const k = {
11
12
  backendName: "cpu",
12
13
  kernelFunc: i
13
14
  };
14
- o(k);
15
+ a(k);
15
16
  const g = {
16
17
  kernelName: "RMSNorm",
17
18
  backendName: "tensorflow",
18
19
  kernelFunc: i
19
20
  };
20
- o(g);
21
+ a(g);
21
22
  function N(t) {
22
23
  const { dy: e, x: n, gamma: s } = t.inputs;
23
24
  return d(() => {
24
- const r = n.shape[n.shape.length - 1], a = n.square().mean(-1, !0), m = a.add(1e-8).rsqrt(), u = n.mul(m), l = e.mul(u).sum([0, 1]), c = e.mul(s), f = c.mul(n).sum(-1, !0).div(r);
25
- return [c.mul(m).sub(n.mul(f).mul(m).div(a.add(1e-8))), l];
25
+ const r = n.shape[n.shape.length - 1], m = n.square().mean(-1, !0), o = m.add(1e-8).rsqrt(), u = n.mul(o), l = e.mul(u).sum([0, 1]), c = e.mul(s), f = c.mul(n).sum(-1, !0).div(r);
26
+ return [c.mul(o).sub(n.mul(f).mul(o).div(m.add(1e-8))), l];
26
27
  });
27
28
  }
28
29
  const S = {
@@ -30,10 +31,10 @@ const S = {
30
31
  backendName: "cpu",
31
32
  kernelFunc: N
32
33
  };
33
- o(S);
34
- const R = {
34
+ a(S);
35
+ const p = {
35
36
  kernelName: "RMSNormGrad",
36
37
  backendName: "tensorflow",
37
38
  kernelFunc: N
38
39
  };
39
- o(R);
40
+ a(p);
@@ -1,25 +1,29 @@
1
- import { f as q } from "../../index-BzFyqcy-.js";
2
- import { r as o } from "../../reshape-CnIwVG1c.js";
3
- import { s as x } from "../../split-DK2k5eHf.js";
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { r as q } from "../../tensor_util-DV-FP5Q3.js";
3
+ import { r as o } from "../../reshape-DevtBWtf.js";
4
+ import { s as x } from "../../split-DrfihRpZ.js";
4
5
  function v(p) {
5
- const { x: c, kernel: K } = p.inputs, { heads: n } = p.attrs, [s, e, t] = c.shape, a = o(c, [s * e, t]), i = a.dot(K);
6
+ const { x: c, kernel: K } = p.inputs, { heads: n, packed: C } = p.attrs;
7
+ if (C)
8
+ throw new Error("QKV CPU implementation does not support packed tensors.");
9
+ const [s, e, t] = c.shape, a = o(c, [s * e, t]), i = a.dot(K);
6
10
  a.dispose();
7
11
  const d = o(i, [s, e, 3 * t]);
8
12
  i.dispose();
9
- const [k, l, m] = x(d, 3, -1);
13
+ const [k, m, l] = x(d, 3, -1);
10
14
  d.dispose();
11
15
  const r = t / n, f = o(k, [s, e, n, r]);
12
16
  k.dispose();
13
- const C = f.transpose([0, 2, 1, 3]);
17
+ const w = f.transpose([0, 2, 1, 3]);
14
18
  f.dispose();
15
- const h = o(l, [s, e, n, r]);
16
- l.dispose();
19
+ const h = o(m, [s, e, n, r]);
20
+ m.dispose();
17
21
  const N = h.transpose([0, 2, 1, 3]);
18
22
  h.dispose();
19
- const u = o(m, [s, e, n, r]);
20
- m.dispose();
23
+ const u = o(l, [s, e, n, r]);
24
+ l.dispose();
21
25
  const T = u.transpose([0, 2, 1, 3]);
22
- return u.dispose(), [C, N, T];
26
+ return u.dispose(), [w, N, T];
23
27
  }
24
28
  const F = {
25
29
  kernelName: "QKV",
@@ -27,12 +31,12 @@ const F = {
27
31
  kernelFunc: v
28
32
  };
29
33
  q(F);
30
- const R = {
34
+ const Q = {
31
35
  kernelName: "QKV",
32
36
  backendName: "tensorflow",
33
37
  kernelFunc: v
34
38
  };
35
- q(R);
39
+ q(Q);
36
40
  export {
37
41
  v as qkvCPU
38
42
  };
@@ -1,37 +1,38 @@
1
- import { f as S } from "../../index-BzFyqcy-.js";
2
- import { r as F } from "../../range-CWcz7xFA.js";
3
- import { g as I } from "../../gather-Dnpgw-YQ.js";
4
- import { s as E } from "../../stack-DFatutCx.js";
5
- import { c as T } from "../../concat-B912vBbo.js";
6
- function U(t, c, p, o, r) {
7
- const n = o.shape[3], s = p;
8
- if (s > n) return o;
9
- const e = o.shape[2], i = s / 2, a = c.slice([r, 0, 0], [e, i, 1]).reshape([1, 1, e, i]), d = t.slice([r, 0, 0], [e, i, 1]).reshape([1, 1, e, i]), l = o.shape[0], m = o.shape[1], h = F(0, s, 2, "int32"), k = F(1, s, 2, "int32"), D = ((R) => {
10
- const g = R.slice([0, 0, 0, 0], [l, m, e, s]), v = s < n ? R.slice([0, 0, 0, s], [l, m, e, n - s]) : null, u = I(g, h, 3), f = I(g, k, 3), C = u.mul(a), N = f.mul(d), P = C.sub(N), b = f.mul(a), x = u.mul(d), K = b.add(x);
11
- u.dispose(), f.dispose(), a.dispose(), d.dispose(), C.dispose(), N.dispose(), b.dispose(), x.dispose();
12
- const w = E([P, K], -1);
13
- P.dispose(), K.dispose();
14
- const y = w.reshape([l, m, e, s]);
15
- return w.dispose(), v ? T([y, v], 3) : y;
16
- })(o);
17
- return h.dispose(), k.dispose(), D;
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { r as I } from "../../tensor_util-DV-FP5Q3.js";
3
+ import { r as y } from "../../range-BMS52eQi.js";
4
+ import { g as F } from "../../gather-DykLGqmW.js";
5
+ import { s as E } from "../../stack-yOIAalTq.js";
6
+ import { c as T } from "../../concat-BHlIJeyT.js";
7
+ function U(c, r, p, e, n) {
8
+ const t = e.shape[3], s = p;
9
+ if (s > t) return e;
10
+ const o = e.shape[2], i = s / 2, a = r.slice([n, 0, 0], [o, i, 1]).reshape([1, 1, o, i]), d = c.slice([n, 0, 0], [o, i, 1]).reshape([1, 1, o, i]), l = e.shape[0], m = e.shape[1], h = y(0, s, 2, "int32"), g = y(1, s, 2, "int32"), D = ((k) => {
11
+ const C = k.slice([0, 0, 0, 0], [l, m, o, s]), R = s < t ? k.slice([0, 0, 0, s], [l, m, o, t - s]) : null, u = F(C, h, 3), f = F(C, g, 3), v = u.mul(a), N = f.mul(d), S = v.sub(N), P = f.mul(a), b = u.mul(d), x = P.add(b);
12
+ u.dispose(), f.dispose(), a.dispose(), d.dispose(), v.dispose(), N.dispose(), P.dispose(), b.dispose();
13
+ const K = E([S, x], -1);
14
+ S.dispose(), x.dispose();
15
+ const w = K.reshape([l, m, o, s]);
16
+ return K.dispose(), R ? T([w, R], 3) : w;
17
+ })(e);
18
+ return h.dispose(), g.dispose(), D;
18
19
  }
19
- function B(t) {
20
- const { x: c, sin: p, cos: o } = t.inputs, { pastLen: r } = t.attrs, n = c.shape[3];
21
- return U(p, o, n, c, r);
20
+ function B(c) {
21
+ const { x: r } = c.inputs, { pastLen: p, negSin: e, ropeCache: n } = c.attrs, t = r.shape[3], s = e ? n.getNegSin() : n.getSin(), o = n.getCos();
22
+ return U(s, o, t, r, p);
22
23
  }
23
24
  const j = {
24
25
  kernelName: "Rope",
25
26
  backendName: "cpu",
26
27
  kernelFunc: B
27
28
  };
28
- S(j);
29
+ I(j);
29
30
  const z = {
30
31
  kernelName: "Rope",
31
32
  backendName: "tensorflow",
32
33
  kernelFunc: B
33
34
  };
34
- S(z);
35
+ I(z);
35
36
  export {
36
37
  U as applyRoPE,
37
38
  B as ropeCPU
@@ -1,39 +1,25 @@
1
- import { E as f, M as g, F as r, G as l, a7 as N, f as b, c as S, a as h } from "../../index-BzFyqcy-.js";
2
- import { v as D } from "../../scatter_nd_util-BQdz--Gn.js";
3
- import { r as k } from "../../range-CWcz7xFA.js";
4
- import { s as v } from "../../stack-DFatutCx.js";
5
- import { o as E } from "../../ones-tIJeHlq-.js";
6
- /**
7
- * @license
8
- * Copyright 2018 Google LLC. All Rights Reserved.
9
- * Licensed under the Apache License, Version 2.0 (the "License");
10
- * you may not use this file except in compliance with the License.
11
- * You may obtain a copy of the License at
12
- *
13
- * http://www.apache.org/licenses/LICENSE-2.0
14
- *
15
- * Unless required by applicable law or agreed to in writing, software
16
- * distributed under the License is distributed on an "AS IS" BASIS,
17
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- * See the License for the specific language governing permissions and
19
- * limitations under the License.
20
- * =============================================================================
21
- */
22
- function I(a, e, s) {
23
- g(s);
24
- const n = r(a, "indices", "scatterND", "int32"), t = r(e, "updates", "scatterND");
1
+ import { A as f, B as c, E as g, c as l, m as N } from "../../index-ZyQhjEPo.js";
2
+ import { j as b, r as S } from "../../tensor_util-DV-FP5Q3.js";
3
+ import { d as h } from "../../tensor-DdQUJZlz.js";
4
+ import { v as D } from "../../scatter_nd_util-5EL-8VAQ.js";
5
+ import { r as k } from "../../range-BMS52eQi.js";
6
+ import { s as v } from "../../stack-yOIAalTq.js";
7
+ import { o as E } from "../../ones-CAMiP4I2.js";
8
+ function I(r, e, s) {
9
+ h(s);
10
+ const n = c(r, "indices", "scatterND", "int32"), t = c(e, "updates", "scatterND");
25
11
  D(t, n, s);
26
- const c = { indices: n, updates: t }, o = { shape: s };
27
- return l.runKernel(N, c, o);
12
+ const o = { indices: n, updates: t }, a = { shape: s };
13
+ return g.runKernel(b, o, a);
28
14
  }
29
15
  const K = /* @__PURE__ */ f({ scatterND_: I });
30
- function L(a) {
31
- const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u = K(i, d, [t, c]), p = S(e, u), m = n.reshape([t, 1]);
32
- return h(p, m);
16
+ function L(r) {
17
+ const { logits: e, labels: s, dy: n } = r.inputs, t = s.shape[0], o = e.shape[1], a = k(0, t, 1, "int32"), i = v([a, s], 1), d = E([t]), p = K(i, d, [t, o]), u = l(e, p), m = n.reshape([t, 1]);
18
+ return N(u, m);
33
19
  }
34
20
  const T = {
35
21
  kernelName: "EfficientScatterSub",
36
22
  backendName: "cpu",
37
23
  kernelFunc: L
38
24
  };
39
- b(T);
25
+ S(T);
@@ -0,0 +1,2 @@
1
+ import { Tensor } from '@tensorflow/tfjs-core';
2
+ export declare function dot16(a: Tensor, b: Tensor, transposeA?: boolean, transposeB?: boolean): Tensor;
@@ -0,0 +1,42 @@
1
+ import { b as d } from "../matMul16--R5hOwDG.js";
2
+ import { transpose16 as w } from "./transpose16.js";
3
+ import { reshape16 as n } from "./reshape16.js";
4
+ import { isPackedTensor as p } from "../utilities/packed.js";
5
+ import { d as x } from "../tfjs_backend-kNyO5L2d.js";
6
+ function E(e, s, h = !1, c = !1) {
7
+ if (!p(e) && !p(s))
8
+ return x(e, s);
9
+ if (e.rank < 2 || s.rank < 2)
10
+ throw new Error(
11
+ `dot requires both inputs to be rank >= 2 but got x shape = ${e.shape} and y shape = ${s.shape}`
12
+ );
13
+ if (s.rank >= 3) {
14
+ const r = e.shape.slice(-1)[0], i = s.shape.slice(-2)[0];
15
+ if (r !== i)
16
+ throw new Error(
17
+ `If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${e.shape} and y shape = ${s.shape}`
18
+ );
19
+ }
20
+ if (!p(e) || !p(s))
21
+ throw new Error("dot16 requires both inputs to be packed Tensors.");
22
+ if (e.rank === 2 && s.rank === 2)
23
+ return d(e, s, h, c);
24
+ {
25
+ const r = e.shape.slice(), i = r.pop();
26
+ e = n(e, [-1, i]);
27
+ const a = s.shape.slice(), l = a.pop(), m = a.pop(), k = [...a, l], f = Array.from({ length: s.rank }, (o, t) => t === 0 ? s.rank - 2 : t <= s.rank - 2 ? t - 1 : t);
28
+ if (f.every((o, t) => o === t))
29
+ s = n(s, [m, -1]);
30
+ else {
31
+ const o = w(s, f);
32
+ s = n(o, [m, -1]), o.dispose();
33
+ }
34
+ const y = [...r, ...k], u = d(e, s, h, c);
35
+ e.dispose(), s.dispose();
36
+ const D = n(u, y);
37
+ return u.dispose(), D;
38
+ }
39
+ }
40
+ export {
41
+ E as dot16
42
+ };
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-BzFyqcy-.js";
1
+ import { e as n } from "../index-ZyQhjEPo.js";
2
2
  import "./cpu/gatherSub.js";
3
3
  import "./webgl/gatherSub.js";
4
4
  function f(r, e, t) {
package/dist/ops/gelu.js CHANGED
@@ -1,7 +1,7 @@
1
- import "../index-BzFyqcy-.js";
1
+ import "../index-ZyQhjEPo.js";
2
2
  import "./cpu/gelu.js";
3
3
  import "./webgl/gelu.js";
4
- import { d as e, g as i } from "../gelu-Bp_-935b.js";
4
+ import { d as e, g as i } from "../gelu-CNLFZWea.js";
5
5
  export {
6
6
  e as dGelu,
7
7
  i as gelu
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,27 @@
1
+ import { j as u, q as d } from "../../index-ZyQhjEPo.js";
2
+ import { sum16 as p } from "../sum16.js";
3
+ import { reshape16 as c } from "../reshape16.js";
4
+ import { a as h } from "../../tensor_util-DV-FP5Q3.js";
5
+ const m = {
6
+ kernelName: "Add16",
7
+ inputsToSave: ["a", "b"],
8
+ gradFunc: (s, i) => {
9
+ const [t, a] = i, n = u(t.shape, a.shape);
10
+ if (Array.isArray(s))
11
+ throw new Error("Add16 gradFunc expected dy to be a Tensor but got an array");
12
+ return { a: () => {
13
+ let e = s;
14
+ const r = d(t.shape, n);
15
+ r.length > 0 && (e = p(e, r));
16
+ const o = c(e, t.shape);
17
+ return e.dispose(), o;
18
+ }, b: () => {
19
+ let e = s;
20
+ const r = d(a.shape, n);
21
+ r.length > 0 && (e = p(e, r));
22
+ const o = c(e, a.shape);
23
+ return e.dispose(), o;
24
+ } };
25
+ }
26
+ };
27
+ h(m);
@@ -1,29 +1,22 @@
1
- import { i as m, b as i } from "../../index-BzFyqcy-.js";
2
- import { matMulMul as u } from "../matMulMul.js";
3
- const p = {
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { m as o } from "../../matMul16--R5hOwDG.js";
3
+ import { transpose16 as m } from "../transpose16.js";
4
+ import { a as c } from "../../tensor_util-DV-FP5Q3.js";
5
+ const l = {
4
6
  kernelName: "AttentionMask",
5
7
  inputsToSave: ["q", "k"],
6
8
  outputsToSave: [],
7
- gradFunc: (t, c, l) => {
8
- if (Array.isArray(t))
9
+ gradFunc: (r, s, n) => {
10
+ if (Array.isArray(r))
9
11
  throw new Error("Expected dy to be a single Tensor");
10
- const [e, n] = c, { divisor: a } = l;
12
+ const [a, i] = s, { divisor: e } = n;
11
13
  return {
12
- q: () => u(t, n, i(a)),
14
+ q: () => o(r, i, e),
13
15
  k: () => {
14
- const s = e.transpose([0, 1, 3, 2]), r = u(s, t, i(a));
15
- s.dispose();
16
- const o = r.transpose([0, 1, 3, 2]);
17
- return r.dispose(), o;
18
- },
19
- mask: () => t,
20
- divisor: () => {
21
- const s = e.matMul(n, !1, !0), r = t.mul(s);
22
- s.dispose();
23
- const o = r.sum();
24
- return r.dispose(), o;
16
+ const t = o(a, r, e, !0, !1), u = m(t, [0, 1, 3, 2]);
17
+ return t.dispose(), u;
25
18
  }
26
19
  };
27
20
  }
28
21
  };
29
- m(p);
22
+ c(l);
@@ -1,5 +1,6 @@
1
- import "../../index-BzFyqcy-.js";
2
- import { a as e } from "../../gelu-Bp_-935b.js";
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { a as m } from "../../gelu-CNLFZWea.js";
3
+ import "../../tensor_util-DV-FP5Q3.js";
3
4
  export {
4
- e as geluGradConfig
5
+ m as geluGradConfig
5
6
  };
@@ -0,0 +1,2 @@
1
+ import { GradConfig } from '@tensorflow/tfjs-core';
2
+ export declare const matMul16GradConfig: GradConfig;
@@ -0,0 +1,9 @@
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { a as f } from "../../matMul16--R5hOwDG.js";
3
+ import "../../gelu-CNLFZWea.js";
4
+ import "../transpose16.js";
5
+ import "../reshape16.js";
6
+ import "../../tensor_util-DV-FP5Q3.js";
7
+ export {
8
+ f as matMul16GradConfig
9
+ };
@@ -1,17 +1,18 @@
1
- import { i as a, e as o } from "../../index-BzFyqcy-.js";
2
- function i(e, n, r) {
3
- return o().runKernel("MatMulGeluGrad", { dy: e, x: n, kernel: r });
1
+ import { e as l } from "../../index-ZyQhjEPo.js";
2
+ import { a as o } from "../../tensor_util-DV-FP5Q3.js";
3
+ function i(e, r, n) {
4
+ return l().runKernel("MatMulGeluGrad", { dy: e, x: r, kernel: n });
4
5
  }
5
6
  const s = {
6
7
  kernelName: "MatMulGelu",
7
8
  inputsToSave: ["x", "kernel"],
8
9
  outputsToSave: [],
9
- gradFunc: (e, n) => {
10
- const [r, t] = n, [u, l] = i(e, r, t);
10
+ gradFunc: (e, r) => {
11
+ const [n, t] = r, [u, a] = i(e, n, t);
11
12
  return {
12
13
  x: () => u,
13
- kernel: () => l
14
+ kernel: () => a
14
15
  };
15
16
  }
16
17
  };
17
- a(s);
18
+ o(s);
@@ -1,20 +1,21 @@
1
- import { i as t, e as g } from "../../index-BzFyqcy-.js";
2
- function i(r, a, n) {
3
- return g().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
1
+ import { e as t } from "../../index-ZyQhjEPo.js";
2
+ import { a as g } from "../../tensor_util-DV-FP5Q3.js";
3
+ function i(r, a, m) {
4
+ return t().runKernel("RMSNormGrad", { dy: r, x: a, gamma: m });
4
5
  }
5
6
  const s = {
6
7
  kernelName: "RMSNorm",
7
8
  inputsToSave: ["x", "gamma"],
8
9
  outputsToSave: [],
9
10
  gradFunc: (r, a) => {
10
- const [n, e] = a, [m, o] = i(r, n, e);
11
+ const [m, n] = a, [o, e] = i(r, m, n);
11
12
  return {
12
- x: () => m,
13
- gamma: () => o
13
+ x: () => o,
14
+ gamma: () => e
14
15
  };
15
16
  }
16
17
  };
17
- t(s);
18
+ g(s);
18
19
  export {
19
20
  s as normRMSGradConfig
20
21
  };
@@ -1,2 +1,2 @@
1
1
  import { GradConfig } from '@tensorflow/tfjs-core';
2
- export declare const softmaxGradConfig: GradConfig;
2
+ export declare const packGradConfig: GradConfig;