@genai-fi/nanogpt 0.9.1 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (343) hide show
  1. package/README.md +352 -14
  2. package/dist/Generator.js +69 -78
  3. package/dist/{RealDiv-D4EzDsC0.js → RealDiv-DgA3z9oO.js} +32 -206
  4. package/dist/Reshape-CF6odzV4.js +16 -0
  5. package/dist/Reshape-_kILl6tK.js +81 -0
  6. package/dist/TeachableLLM.js +28 -22
  7. package/dist/Trainer.d.ts +2 -0
  8. package/dist/Trainer.js +3 -2
  9. package/dist/{axis_util-TbGYJ208.js → axis_util-BvHEw88j.js} +7 -23
  10. package/dist/backend.d.ts +2 -1
  11. package/dist/backend.js +10 -4
  12. package/dist/backend_util-D-rUb2ty.js +474 -0
  13. package/dist/backend_webgpu-B0u2ndUn.js +547 -0
  14. package/dist/binary_op_util-pKXltfxI.js +192 -0
  15. package/dist/broadcast_to-CwF7XIeu.js +30 -0
  16. package/dist/checks/appendCache.js +2 -2
  17. package/dist/checks/attentionMask.js +3 -3
  18. package/dist/checks/check.d.ts +1 -1
  19. package/dist/checks/check.js +8 -8
  20. package/dist/checks/gelu.js +2 -2
  21. package/dist/checks/index.d.ts +2 -0
  22. package/dist/checks/index.js +7 -5
  23. package/dist/checks/matMulGelu.js +6 -6
  24. package/dist/checks/normRMS.js +7 -7
  25. package/dist/checks/normRMSGrad.js +3 -3
  26. package/dist/checks/packUnpack.d.ts +1 -0
  27. package/dist/checks/packUnpack.js +18 -0
  28. package/dist/checks/qkv.js +12 -27
  29. package/dist/checks/rope.js +2 -2
  30. package/dist/checks/weights.js +18 -16
  31. package/dist/complex-CSlYz-2T.js +13 -0
  32. package/dist/complex_util-Yc1A_gV1.js +55 -0
  33. package/dist/concat-BHlIJeyT.js +19 -0
  34. package/dist/concat_util-DcJk7YHS.js +22 -0
  35. package/dist/data/docx.js +1 -1
  36. package/dist/data/parquet.js +2 -2
  37. package/dist/data/pdf.js +1 -1
  38. package/dist/data/textLoader.js +1 -1
  39. package/dist/{dataset-DlZtKmBq.js → dataset-0xP8GjwI.js} +136 -236
  40. package/dist/dropout-C1pM3f11.js +99 -0
  41. package/dist/expand_dims-BPG4fwBP.js +13 -0
  42. package/dist/exports_initializers-xuidcwI4.js +7 -0
  43. package/dist/gather-DykLGqmW.js +10 -0
  44. package/dist/{gelu-Bp_-935b.js → gelu-CNLFZWea.js} +11 -10
  45. package/dist/{gpgpu_math-CDaYiyE_.js → gpgpu_math-DDVJCn6-.js} +90 -265
  46. package/dist/{index-C4L8Cm77.js → index-CieiGp4Y.js} +14 -14
  47. package/dist/index-CjOj7j-u.js +7308 -0
  48. package/dist/{index-Tf7vU29b.js → index-Cp39cXWe.js} +3 -10
  49. package/dist/{index-Dwqa6Zy2.js → index-DvYrXKkX.js} +2 -2
  50. package/dist/index-ZyQhjEPo.js +2157 -0
  51. package/dist/{jszip.min-CjP2V1VV.js → jszip.min-Bz5-11Bk.js} +56 -57
  52. package/dist/kernel_funcs_utils-Dg_-E44D.js +308 -0
  53. package/dist/layers/BaseLayer.d.ts +1 -0
  54. package/dist/layers/BaseLayer.js +7 -6
  55. package/dist/layers/CausalSelfAttention.d.ts +0 -1
  56. package/dist/layers/CausalSelfAttention.js +56 -55
  57. package/dist/layers/MLP.js +15 -16
  58. package/dist/layers/PositionEmbedding.js +5 -14
  59. package/dist/layers/RMSNorm.js +3 -3
  60. package/dist/layers/RoPECache.d.ts +2 -0
  61. package/dist/layers/RoPECache.js +22 -17
  62. package/dist/layers/TiedEmbedding.js +22 -17
  63. package/dist/layers/TransformerBlock.js +21 -20
  64. package/dist/loader/load.js +1 -1
  65. package/dist/loader/loadTransformers.js +1 -1
  66. package/dist/loader/oldZipLoad.js +39 -33
  67. package/dist/loader/save.js +1 -1
  68. package/dist/log_sum_exp-DWI-76TI.js +41 -0
  69. package/dist/main.d.ts +8 -0
  70. package/dist/main.js +63 -52
  71. package/dist/matMul16--R5hOwDG.js +77 -0
  72. package/dist/mat_mul-DeAh4uTH.js +12 -0
  73. package/dist/mod-Gt1rMB4n.js +12 -0
  74. package/dist/models/NanoGPTV1.js +40 -31
  75. package/dist/models/model.d.ts +2 -0
  76. package/dist/models/model.js +37 -29
  77. package/dist/{mulmat_packed_gpu-BT60jmzP.js → mulmat_packed_gpu-BMFhLwta.js} +1 -17
  78. package/dist/{non_max_suppression_impl-CsEgBuMA.js → non_max_suppression_impl-B2W7YjZB.js} +0 -32
  79. package/dist/ones-CAMiP4I2.js +15 -0
  80. package/dist/ops/adamAdjust.js +1 -1
  81. package/dist/ops/adamMoments.d.ts +1 -1
  82. package/dist/ops/adamMoments.js +4 -4
  83. package/dist/ops/add16.d.ts +2 -0
  84. package/dist/ops/add16.js +9 -0
  85. package/dist/ops/appendCache.js +16 -9
  86. package/dist/ops/attentionMask.js +4 -4
  87. package/dist/ops/concat16.d.ts +2 -0
  88. package/dist/ops/concat16.js +9 -0
  89. package/dist/ops/cpu/adamAdjust.js +14 -13
  90. package/dist/ops/cpu/adamMoments.js +10 -9
  91. package/dist/ops/cpu/appendCache.js +9 -8
  92. package/dist/ops/cpu/attentionMask.js +15 -14
  93. package/dist/ops/cpu/fusedSoftmax.js +13 -12
  94. package/dist/ops/cpu/gatherSub.js +9 -24
  95. package/dist/ops/cpu/gelu.js +13 -12
  96. package/dist/ops/cpu/matMul16.d.ts +1 -0
  97. package/dist/ops/cpu/matMul16.js +16 -0
  98. package/dist/ops/cpu/matMulGelu.js +18 -16
  99. package/dist/ops/cpu/matMulMul.js +8 -7
  100. package/dist/ops/cpu/mulDropout.js +4 -3
  101. package/dist/ops/cpu/normRMS.js +11 -10
  102. package/dist/ops/cpu/qkv.js +17 -13
  103. package/dist/ops/cpu/rope.js +23 -22
  104. package/dist/ops/cpu/scatterSub.js +16 -30
  105. package/dist/ops/dot16.d.ts +2 -0
  106. package/dist/ops/dot16.js +42 -0
  107. package/dist/ops/gatherSub.js +1 -1
  108. package/dist/ops/gelu.js +2 -2
  109. package/dist/ops/grads/add16.d.ts +1 -0
  110. package/dist/ops/grads/add16.js +27 -0
  111. package/dist/ops/grads/attentionMask.js +12 -19
  112. package/dist/ops/grads/gelu.js +4 -3
  113. package/dist/ops/grads/matMul16.d.ts +2 -0
  114. package/dist/ops/grads/matMul16.js +9 -0
  115. package/dist/ops/grads/matMulGelu.js +8 -7
  116. package/dist/ops/grads/normRMS.js +8 -7
  117. package/dist/ops/grads/{fusedSoftmax.d.ts → pack16.d.ts} +1 -1
  118. package/dist/ops/grads/pack16.js +7 -0
  119. package/dist/ops/grads/qkv.d.ts +3 -1
  120. package/dist/ops/grads/qkv.js +28 -22
  121. package/dist/ops/grads/rope.d.ts +2 -1
  122. package/dist/ops/grads/rope.js +6 -13
  123. package/dist/ops/grads/softmax16.d.ts +2 -0
  124. package/dist/ops/grads/softmax16.js +26 -0
  125. package/dist/ops/grads/unpack16.d.ts +2 -0
  126. package/dist/ops/grads/unpack16.js +6 -0
  127. package/dist/ops/grads/utils.d.ts +3 -0
  128. package/dist/ops/grads/utils.js +10 -0
  129. package/dist/ops/matMul16.d.ts +15 -0
  130. package/dist/ops/matMul16.js +13 -0
  131. package/dist/ops/matMulGelu.js +1 -1
  132. package/dist/ops/matMulMul.js +1 -1
  133. package/dist/ops/mul16.d.ts +2 -0
  134. package/dist/ops/mul16.js +8 -0
  135. package/dist/ops/mulDrop.js +1 -1
  136. package/dist/ops/normRMS.js +1 -1
  137. package/dist/ops/pack16.d.ts +2 -0
  138. package/dist/ops/pack16.js +6 -0
  139. package/dist/ops/qkv.d.ts +1 -1
  140. package/dist/ops/qkv.js +8 -4
  141. package/dist/ops/reshape16.d.ts +2 -0
  142. package/dist/ops/reshape16.js +43 -0
  143. package/dist/ops/rope.d.ts +1 -1
  144. package/dist/ops/rope.js +8 -10
  145. package/dist/ops/scatterSub.js +1 -1
  146. package/dist/ops/slice16.d.ts +2 -0
  147. package/dist/ops/slice16.js +9 -0
  148. package/dist/ops/softmax16.d.ts +2 -0
  149. package/dist/ops/softmax16.js +12 -0
  150. package/dist/ops/sub16.d.ts +2 -0
  151. package/dist/ops/sub16.js +8 -0
  152. package/dist/ops/sum16.d.ts +2 -0
  153. package/dist/ops/sum16.js +13 -0
  154. package/dist/ops/transpose16.d.ts +3 -0
  155. package/dist/ops/transpose16.js +41 -0
  156. package/dist/ops/unpack16.d.ts +2 -0
  157. package/dist/ops/unpack16.js +6 -0
  158. package/dist/ops/webgl/adamAdjust.js +3 -2
  159. package/dist/ops/webgl/adamMoments.js +2 -1
  160. package/dist/ops/webgl/appendCache.js +2 -1
  161. package/dist/ops/webgl/attentionMask.js +5 -4
  162. package/dist/ops/webgl/fusedSoftmax.js +6 -4
  163. package/dist/ops/webgl/gatherSub.js +7 -6
  164. package/dist/ops/webgl/gelu.js +3 -2
  165. package/dist/ops/webgl/log.js +12 -27
  166. package/dist/ops/webgl/matMul16.d.ts +1 -0
  167. package/dist/ops/webgl/matMul16.js +37 -0
  168. package/dist/ops/webgl/matMulGelu.js +17 -15
  169. package/dist/ops/webgl/matMulMul.js +13 -12
  170. package/dist/ops/webgl/mulDropout.js +9 -8
  171. package/dist/ops/webgl/normRMS.js +8 -7
  172. package/dist/ops/webgl/qkv.js +6 -5
  173. package/dist/ops/webgl/rope.js +11 -10
  174. package/dist/ops/webgl/scatterSub.js +6 -5
  175. package/dist/ops/webgpu/adamAdjust.js +12 -10
  176. package/dist/ops/webgpu/adamMoments.js +27 -22
  177. package/dist/ops/webgpu/add16.d.ts +1 -0
  178. package/dist/ops/webgpu/add16.js +14 -0
  179. package/dist/ops/webgpu/appendCache.js +64 -17
  180. package/dist/ops/webgpu/attentionMask.js +19 -62
  181. package/dist/ops/webgpu/attentionMask32_program.d.ts +19 -0
  182. package/dist/ops/webgpu/attentionMask32_program.js +54 -0
  183. package/dist/ops/webgpu/concat16.d.ts +19 -0
  184. package/dist/ops/webgpu/concat16.js +128 -0
  185. package/dist/ops/webgpu/gatherSub.js +9 -7
  186. package/dist/ops/webgpu/gelu.js +78 -31
  187. package/dist/ops/webgpu/index.js +12 -0
  188. package/dist/ops/webgpu/matMul16.d.ts +1 -0
  189. package/dist/ops/webgpu/matMul16.js +58 -0
  190. package/dist/ops/webgpu/matMul16_program.d.ts +42 -0
  191. package/dist/ops/webgpu/matMul16_program.js +336 -0
  192. package/dist/ops/webgpu/mul16.d.ts +1 -0
  193. package/dist/ops/webgpu/mul16.js +14 -0
  194. package/dist/ops/webgpu/normRMS.js +21 -40
  195. package/dist/ops/webgpu/normRMS16_program.d.ts +9 -0
  196. package/dist/ops/webgpu/normRMS16_program.js +24 -0
  197. package/dist/ops/webgpu/normRMS32_program.d.ts +9 -0
  198. package/dist/ops/webgpu/normRMS32_program.js +24 -0
  199. package/dist/ops/webgpu/normRMSGrad.js +113 -64
  200. package/dist/ops/webgpu/pack16.d.ts +1 -0
  201. package/dist/ops/webgpu/pack16.js +19 -0
  202. package/dist/ops/webgpu/pack16_program.d.ts +19 -0
  203. package/dist/ops/webgpu/pack16_program.js +92 -0
  204. package/dist/ops/webgpu/qkv.js +20 -55
  205. package/dist/ops/webgpu/rope.js +77 -22
  206. package/dist/ops/webgpu/scatterSub.js +9 -7
  207. package/dist/ops/webgpu/slice16.d.ts +7 -0
  208. package/dist/ops/webgpu/slice16.js +71 -0
  209. package/dist/{variable-Bm2OFwGI.js → ops/webgpu/softmax16.d.ts} +2 -8
  210. package/dist/ops/webgpu/softmax16.js +23 -0
  211. package/dist/ops/webgpu/softmax16_program.d.ts +13 -0
  212. package/dist/ops/webgpu/softmax16_program.js +73 -0
  213. package/dist/ops/webgpu/softmax16_subgroup_program.d.ts +17 -0
  214. package/dist/ops/webgpu/softmax16_subgroup_program.js +75 -0
  215. package/dist/ops/webgpu/softmax16grad.d.ts +1 -0
  216. package/dist/ops/webgpu/softmax16grad.js +38 -0
  217. package/dist/ops/webgpu/sub16.d.ts +1 -0
  218. package/dist/ops/webgpu/sub16.js +14 -0
  219. package/dist/ops/webgpu/sum16.d.ts +1 -0
  220. package/dist/ops/webgpu/sum16.js +40 -0
  221. package/dist/ops/webgpu/transpose16.d.ts +1 -0
  222. package/dist/ops/webgpu/transpose16.js +35 -0
  223. package/dist/ops/webgpu/transpose16_program.d.ts +16 -0
  224. package/dist/ops/webgpu/transpose16_program.js +50 -0
  225. package/dist/ops/webgpu/transpose16_shared_program.d.ts +15 -0
  226. package/dist/ops/webgpu/transpose16_shared_program.js +71 -0
  227. package/dist/ops/webgpu/unpack16.d.ts +1 -0
  228. package/dist/ops/webgpu/unpack16.js +49 -0
  229. package/dist/ops/webgpu/utils/binary_op.d.ts +19 -0
  230. package/dist/ops/webgpu/utils/binary_op.js +79 -0
  231. package/dist/ops/webgpu/utils/deviceInfo.d.ts +7 -0
  232. package/dist/ops/webgpu/utils/deviceInfo.js +11 -0
  233. package/dist/ops/webgpu/utils/reductions.d.ts +32 -4
  234. package/dist/ops/webgpu/utils/reductions.js +236 -45
  235. package/dist/ops-CNI3TwqM.js +645 -0
  236. package/dist/pack16-CFUqumar.js +41 -0
  237. package/dist/{papaparse.min-C8l2Kvo1.js → papaparse.min-C0cScC2i.js} +2 -8
  238. package/dist/{parquet-C0Tlmv9c.js → parquet-BE8MU_ge.js} +201 -278
  239. package/dist/patches/PackedTensor.d.ts +12 -0
  240. package/dist/patches/PackedTensor.js +11 -0
  241. package/dist/patches/engine.d.ts +261 -0
  242. package/dist/patches/engine.js +10 -0
  243. package/dist/patches/tape.d.ts +12 -0
  244. package/dist/patches/tape.js +5 -0
  245. package/dist/patches/webgpu_backend.d.ts +18 -0
  246. package/dist/patches/webgpu_backend.js +57 -0
  247. package/dist/{tensor-CZr4dh61.js → patches/webgpu_base.d.ts} +5 -8
  248. package/dist/patches/webgpu_base.js +34 -0
  249. package/dist/patches/webgpu_program.d.ts +36 -0
  250. package/dist/patches/webgpu_program.js +401 -0
  251. package/dist/{pdf-kJD-f258.js → pdf-NIhmP3sq.js} +424 -428
  252. package/dist/random_width-DY6Kk2Dl.js +10051 -0
  253. package/dist/range-BMS52eQi.js +11 -0
  254. package/dist/reciprocal-CTmshQ9J.js +10 -0
  255. package/dist/{register_all_kernels-DIGpEwcf.js → register_all_kernels-Bwu1PTuU.js} +719 -9766
  256. package/dist/relu-yZ2-7WxU.js +10 -0
  257. package/dist/reshape-DevtBWtf.js +10 -0
  258. package/dist/rope-B5UUMsPi.js +32 -0
  259. package/dist/{scatter_nd_util-BQdz--Gn.js → scatter_nd_util-5EL-8VAQ.js} +1 -1
  260. package/dist/selu_util-D1w6yyTO.js +303 -0
  261. package/dist/{shared-DuP7ue-R.js → shared-BRksrJb3.js} +1 -17
  262. package/dist/shared-BuAXb4CI.js +2145 -0
  263. package/dist/sin-BGfy2HZo.js +16 -0
  264. package/dist/slice-D_gkkqZK.js +13 -0
  265. package/dist/slice_util-DtEldBfK.js +261 -0
  266. package/dist/softmax-ZHVebtR1.js +13 -0
  267. package/dist/split-DrfihRpZ.js +10 -0
  268. package/dist/squeeze-DZEpeblb.js +11 -0
  269. package/dist/stack-yOIAalTq.js +13 -0
  270. package/dist/sum-_fzj5ZTB.js +12 -0
  271. package/dist/tensor-DdQUJZlz.js +909 -0
  272. package/dist/tensor-f35l8Odg.js +8 -0
  273. package/dist/tensor1d-CeZuc-Rv.js +12 -0
  274. package/dist/tensor2d-G4Ys2GxX.js +15 -0
  275. package/dist/tensor4d-B8roDgtc.js +15 -0
  276. package/dist/tensor_util-DV-FP5Q3.js +523 -0
  277. package/dist/tfjs_backend-kNyO5L2d.js +653 -0
  278. package/dist/tile-BzyEiF-F.js +13 -0
  279. package/dist/tokeniser/CharTokeniser.js +1 -1
  280. package/dist/tokeniser/bpe.js +1 -1
  281. package/dist/training/Adam.d.ts +2 -1
  282. package/dist/training/Adam.js +12 -28
  283. package/dist/training/AdamExt.d.ts +1 -0
  284. package/dist/training/AdamExt.js +2 -2
  285. package/dist/training/DatasetBuilder.js +3 -20
  286. package/dist/training/FullTrainer.js +55 -48
  287. package/dist/training/Trainer.d.ts +11 -6
  288. package/dist/training/Trainer.js +51 -39
  289. package/dist/training/sparseCrossEntropy.js +3 -3
  290. package/dist/transpose-DKELTqhe.js +38 -0
  291. package/dist/utilities/arrayClose.js +7 -7
  292. package/dist/utilities/dummy.js +35 -27
  293. package/dist/utilities/multinomialCPU.js +2 -2
  294. package/dist/utilities/packed.d.ts +7 -0
  295. package/dist/utilities/packed.js +716 -0
  296. package/dist/utilities/performance.js +1 -1
  297. package/dist/utilities/profile.js +1 -1
  298. package/dist/utilities/safetensors.js +2 -2
  299. package/dist/utilities/sentences.d.ts +5 -0
  300. package/dist/utilities/sentences.js +41 -0
  301. package/dist/utilities/weights.js +2 -2
  302. package/dist/variable-Bhn5bHYv.js +7 -0
  303. package/dist/{webgpu_program-DkQJOJSd.js → webgpu_program-Cigz-7RF.js} +15 -44
  304. package/dist/webgpu_util-BBCnKm2X.js +65 -0
  305. package/dist/zeros-2gldETuK.js +14 -0
  306. package/package.json +4 -3
  307. package/dist/Reshape-Bowtk9BP.js +0 -127
  308. package/dist/Reshape-DUqYftGC.js +0 -30
  309. package/dist/backend_util-CJIiDoV1.js +0 -749
  310. package/dist/broadcast_to-DzlNweb8.js +0 -44
  311. package/dist/concat-B912vBbo.js +0 -33
  312. package/dist/dropout-C-csYCLj.js +0 -193
  313. package/dist/exports_initializers-B8iZMgQ0.js +0 -16
  314. package/dist/gather-Dnpgw-YQ.js +0 -25
  315. package/dist/index-BzFyqcy-.js +0 -4457
  316. package/dist/index-C1rx_Ajs.js +0 -12076
  317. package/dist/kernel_funcs_utils-DKLK0Mg3.js +0 -466
  318. package/dist/log_sum_exp-DO6z8tSE.js +0 -103
  319. package/dist/mat_mul-DzjTFx-u.js +0 -27
  320. package/dist/mod-Dobti4j4.js +0 -27
  321. package/dist/ones-tIJeHlq-.js +0 -29
  322. package/dist/ops/fusedSoftmax.d.ts +0 -2
  323. package/dist/ops/fusedSoftmax.js +0 -10
  324. package/dist/ops/grads/fusedSoftmax.js +0 -22
  325. package/dist/ops-LuCMAnmM.js +0 -1525
  326. package/dist/random_width-CXVRloNK.js +0 -13670
  327. package/dist/range-CWcz7xFA.js +0 -26
  328. package/dist/reciprocal-C4rNcM-S.js +0 -25
  329. package/dist/relu-BjCh_SYb.js +0 -25
  330. package/dist/reshape-CnIwVG1c.js +0 -25
  331. package/dist/selu_util-OtRzVwW5.js +0 -719
  332. package/dist/shared-DmRsFyaJ.js +0 -3134
  333. package/dist/sin-gpDNRxE0.js +0 -47
  334. package/dist/slice-d0Vo9XTN.js +0 -28
  335. package/dist/softmax-D7Jj3p_P.js +0 -28
  336. package/dist/split-DK2k5eHf.js +0 -25
  337. package/dist/stack-DFatutCx.js +0 -27
  338. package/dist/sum-CJ0ULhmt.js +0 -27
  339. package/dist/tensor1d-vML0r3q6.js +0 -27
  340. package/dist/tensor2d-D76QGjF3.js +0 -30
  341. package/dist/tensor4d-Df1WlVDY.js +0 -30
  342. package/dist/webgpu_util-pLEV9tks.js +0 -80
  343. package/dist/zeros-Bj5rMYA7.js +0 -52
@@ -0,0 +1,336 @@
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { e as h } from "../../webgpu_program-Cigz-7RF.js";
3
+ import { s as f } from "../../tensor-DdQUJZlz.js";
4
+ class A {
5
+ variableNames = ["A", "B"];
6
+ outputShape;
7
+ shaderKey = "MatMul16TB";
8
+ dispatchLayout;
9
+ dispatch;
10
+ workgroupSize = [8, 8, 1];
11
+ // 8x8 threads for 32x32 tile
12
+ dimInner;
13
+ transposeA = !1;
14
+ transposeB = !0;
15
+ broadcastBatch = !0;
16
+ tileInner = 32;
17
+ uniforms;
18
+ scale = !1;
19
+ scaleA = !1;
20
+ scaleB = !1;
21
+ activation;
22
+ causalMask = !1;
23
+ outputComponent;
24
+ variableComponents;
25
+ outputIndexSnippet;
26
+ outputStrideSnippet;
27
+ constructor(e, t, o, a, i, r = !1, s = !1) {
28
+ if (this.transposeA = r, this.transposeB = s, this.variableComponents = [2, 2], this.outputComponent = 2, this.shaderKey = `MatMul16TB_${t}_${o}_${a}_${i}_${r ? "TA" : ""}${s ? "TB" : ""}`, r) {
29
+ if (this.outputShape = [e, a, i / 2], this.dimInner = t, t !== o)
30
+ throw new Error("Inner dimensions of A and B must match for MatMul16 transposeA");
31
+ } else if (s) {
32
+ if (this.outputShape = [e, t, o / 2], this.dimInner = i, i !== a)
33
+ throw new Error("Inner dimensions of A and B must match for MatMul16 transposeB");
34
+ } else if (this.outputShape = [e, t, i / 2], this.dimInner = a, a !== o)
35
+ throw new Error("Inner dimensions of A and B must match for MatMul16");
36
+ if (this.dimInner % this.tileInner !== 0)
37
+ throw new Error(`Inner dimension ${this.dimInner} must be multiple of ${this.tileInner}`);
38
+ if (this.dispatchLayout = { x: [2], y: [1], z: [0] }, this.dispatch = [
39
+ Math.ceil(this.outputShape[2] / (this.workgroupSize[0] * 2)),
40
+ // 4 unpacked cols per thread = 2 packed cols
41
+ Math.ceil(this.outputShape[1] / (this.workgroupSize[1] * 4)),
42
+ // 4 rows per thread
43
+ this.outputShape[0]
44
+ ], i % 32 !== 0)
45
+ throw new Error("Head size must be even for MatMul16 transposeB");
46
+ if (a % 32 !== 0)
47
+ throw new Error("Head size must be even for MatMul16 transposeB");
48
+ if (t % 32 !== 0)
49
+ throw new Error("Sequence length must be multiple of 32 for MatMul16 transposeB");
50
+ if (o % 32 !== 0)
51
+ throw new Error("Sequence length must be multiple of 32 for MatMul16 transposeB");
52
+ this.outputIndexSnippet = "var idx0 = getOutputIndexFromCoords(vec3<i32>(batch, gRow, gColPacked));", this.outputStrideSnippet = "idx0 = idx0 + uniforms.outShapeStrides[1]; // Next row";
53
+ }
54
+ addUniform(e) {
55
+ this.uniforms ? this.uniforms += `, ${e}` : this.uniforms = e;
56
+ }
57
+ /* Note: this is done after constructor because it shouldn't affect dispatch */
58
+ setOutputShape(e, t) {
59
+ const o = f(e), a = f(this.outputShape);
60
+ if (o !== a)
61
+ throw new Error(`New shape size ${o} must match current size ${a}`);
62
+ function i(c, u) {
63
+ return [`${c} / ${u}`, `${c} % ${u}`];
64
+ }
65
+ const r = this.outputShape;
66
+ let s = [];
67
+ if (e.length === r.length + 1)
68
+ if (e[0] * e[1] === r[0])
69
+ s = [
70
+ ...i("batch", e[1]),
71
+ // batch / B2, batch % B2
72
+ "gRow",
73
+ "gColPacked"
74
+ ], this.shaderKey += `_batchSplit_${e[1]}`;
75
+ else if (e[e.length - 2] * e[e.length - 1] === r[r.length - 1])
76
+ s = [
77
+ "batch",
78
+ "gRow",
79
+ ...i("gColPacked", e[e.length - 1])
80
+ // gColPacked / N2, gColPacked % N2
81
+ ], this.shaderKey += `_colSplit_${e[e.length - 1]}`;
82
+ else
83
+ throw new Error("Unsupported output shape split");
84
+ else if (e.length === r.length)
85
+ s = ["batch", "gRow", "gColPacked"];
86
+ else if (e.length === 2 && r[0] === 1)
87
+ s = ["gRow", "gColPacked"], this.shaderKey += "_batchRemoved";
88
+ else
89
+ throw new Error(`Unsupported output shape rank change: ${r.length} -> ${e.length}}`);
90
+ let n = [];
91
+ if (t) {
92
+ if (t.length !== e.length)
93
+ throw new Error("Permutation length must match output rank");
94
+ n = t.map((c) => s[c]), this.shaderKey += `_perm_${t.join("")}`;
95
+ } else
96
+ n = s;
97
+ const l = n.findIndex((c) => c === "gRow"), p = `vec${e.length}<i32>(${n.join(", ")})`;
98
+ this.outputIndexSnippet = `var idx0: i32 = getOutputIndexFromCoords(${p});`, this.outputStrideSnippet = `idx0 = idx0 + uniforms.outShapeStrides${l === 0 ? "" : `[${l}]`}; `, t ? this.outputShape = t.map((c) => e[c]) : this.outputShape = e;
99
+ }
100
+ useScale() {
101
+ this.addUniform("scale: f32"), this.scale = !0, this.shaderKey += "_scaled";
102
+ }
103
+ useScaleA() {
104
+ this.addUniform("scaleA: f32"), this.scaleA = !0, this.shaderKey += "_scaledA";
105
+ }
106
+ useScaleB() {
107
+ this.addUniform("scaleB: f32"), this.scaleB = !0, this.shaderKey += "_scaledB";
108
+ }
109
+ useActivation(e) {
110
+ this.activation = e, this.shaderKey += `_${e}`;
111
+ }
112
+ useCausalMask() {
113
+ this.causalMask = !0, this.addUniform("pastLen: i32"), this.shaderKey += "_causalMask";
114
+ }
115
+ activationSnippet() {
116
+ return this.activation === "gelu" ? `
117
+ // TODO: revisit after https://github.com/gpuweb/gpuweb/issues/4458 is resolved
118
+ fn tanhComplete(x: vec4<f32>) -> vec4<f32> {
119
+ return vec4<f32>(
120
+ select(tanh(x.x), sign(x.x), abs(x.x) > 15.0f),
121
+ select(tanh(x.y), sign(x.y), abs(x.y) > 15.0f),
122
+ select(tanh(x.z), sign(x.z), abs(x.z) > 15.0f),
123
+ select(tanh(x.w), sign(x.w), abs(x.w) > 15.0f),
124
+ );
125
+ }
126
+ fn activation(x : vec4<f32>) -> vec4<f32> {
127
+ let x3 = x * x * x;
128
+ var inner = fma(vec4<f32>(${0.044715}f), x3, x);
129
+ inner = ${0.7978845608028654}f * inner;
130
+ inner = tanhComplete(inner);
131
+ inner = 0.5f * (1.0f + inner);
132
+ return x * inner;
133
+ }
134
+ ` : "";
135
+ }
136
+ /* Transpose when writing to shared memory */
137
+ readASnippet() {
138
+ const e = `
139
+ var col = i32(localId.x);
140
+ var row = i32(localId.y) * 4;
141
+ var packedA: vec2<i32> = A[offsetA + row * strideA + col];
142
+ var Arow1 = vec4<f32>(
143
+ unpack2x16float(u32(packedA.x)),
144
+ unpack2x16float(u32(packedA.y))
145
+ );
146
+ packedA = A[offsetA + (row + 1) * strideA + col];
147
+ var Arow2 = vec4<f32>(
148
+ unpack2x16float(u32(packedA.x)),
149
+ unpack2x16float(u32(packedA.y))
150
+ );
151
+ packedA = A[offsetA + (row + 2) * strideA + col];
152
+ var Arow3 = vec4<f32>(
153
+ unpack2x16float(u32(packedA.x)),
154
+ unpack2x16float(u32(packedA.y))
155
+ );
156
+ packedA = A[offsetA + (row + 3) * strideA + col];
157
+ var Arow4 = vec4<f32>(
158
+ unpack2x16float(u32(packedA.x)),
159
+ unpack2x16float(u32(packedA.y))
160
+ );
161
+
162
+ ${this.scaleA ? "Arow1 = Arow1 * uniforms.scaleA;" : ""}
163
+ ${this.scaleA ? "Arow2 = Arow2 * uniforms.scaleA;" : ""}
164
+ ${this.scaleA ? "Arow3 = Arow3 * uniforms.scaleA;" : ""}
165
+ ${this.scaleA ? "Arow4 = Arow4 * uniforms.scaleA;" : ""}
166
+ `;
167
+ return this.transposeA ? `{
168
+ ${e}
169
+ mm_Asub[row][col] = Arow1;
170
+ mm_Asub[row + 1][col] = Arow2;
171
+ mm_Asub[row + 2][col] = Arow3;
172
+ mm_Asub[row + 3][col] = Arow4;
173
+ }` : `{
174
+ ${e}
175
+
176
+ col = i32(localId.x) * 4;
177
+ row = i32(localId.y);
178
+
179
+ mm_Asub[col][row] = vec4<f32>(Arow1.x, Arow2.x, Arow3.x, Arow4.x);
180
+ mm_Asub[col + 1][row] = vec4<f32>(Arow1.y, Arow2.y, Arow3.y, Arow4.y);
181
+ mm_Asub[col + 2][row] = vec4<f32>(Arow1.z, Arow2.z, Arow3.z, Arow4.z);
182
+ mm_Asub[col + 3][row] = vec4<f32>(Arow1.w, Arow2.w, Arow3.w, Arow4.w);
183
+ }`;
184
+ }
185
+ /* Transpose when writing to shared memory */
186
+ readBSnippet() {
187
+ const e = `
188
+ var col = i32(localId.x);
189
+ var row = i32(localId.y) * 4;
190
+ var packedB: vec2<i32> = B[offsetB + row * strideB + col];
191
+ var Brow1 = vec4<f32>(
192
+ unpack2x16float(u32(packedB.x)),
193
+ unpack2x16float(u32(packedB.y))
194
+ );
195
+ packedB = B[offsetB + (row + 1) * strideB + col];
196
+ var Brow2 = vec4<f32>(
197
+ unpack2x16float(u32(packedB.x)),
198
+ unpack2x16float(u32(packedB.y))
199
+ );
200
+ packedB = B[offsetB + (row + 2) * strideB + col];
201
+ var Brow3 = vec4<f32>(
202
+ unpack2x16float(u32(packedB.x)),
203
+ unpack2x16float(u32(packedB.y))
204
+ );
205
+ packedB = B[offsetB + (row + 3) * strideB + col];
206
+ var Brow4 = vec4<f32>(
207
+ unpack2x16float(u32(packedB.x)),
208
+ unpack2x16float(u32(packedB.y))
209
+ );
210
+
211
+ ${this.scaleB ? "Brow1 = Brow1 * uniforms.scaleB;" : ""}
212
+ ${this.scaleB ? "Brow2 = Brow2 * uniforms.scaleB;" : ""}
213
+ ${this.scaleB ? "Brow3 = Brow3 * uniforms.scaleB;" : ""}
214
+ ${this.scaleB ? "Brow4 = Brow4 * uniforms.scaleB;" : ""}
215
+ `;
216
+ return this.transposeB ? `{
217
+ ${e}
218
+
219
+ col = i32(localId.x) * 4;
220
+ row = i32(localId.y);
221
+
222
+ mm_Bsub[col][row] = vec4<f32>(Brow1.x, Brow2.x, Brow3.x, Brow4.x);
223
+ mm_Bsub[col + 1][row] = vec4<f32>(Brow1.y, Brow2.y, Brow3.y, Brow4.y);
224
+ mm_Bsub[col + 2][row] = vec4<f32>(Brow1.z, Brow2.z, Brow3.z, Brow4.z);
225
+ mm_Bsub[col + 3][row] = vec4<f32>(Brow1.w, Brow2.w, Brow3.w, Brow4.w);
226
+ }` : `{
227
+ ${e}
228
+ mm_Bsub[row][col] = Brow1;
229
+ mm_Bsub[row + 1][col] = Brow2;
230
+ mm_Bsub[row + 2][col] = Brow3;
231
+ mm_Bsub[row + 3][col] = Brow4;
232
+ }`;
233
+ }
234
+ baseIndexSnippets() {
235
+ const e = `
236
+ let strideA = uniforms.aShape.z / 2;
237
+ let strideB = uniforms.bShape.z / 2;
238
+ `;
239
+ let t = "";
240
+ this.transposeB ? t = "let baseB = getIndexFromCoords3D(vec3<i32>(batchB, globalColStart, 0), vec3<i32>(uniforms.bShape.x, uniforms.bShape.y, strideB));" : t = "let baseB = getIndexFromCoords3D(vec3<i32>(batchB, 0, globalColStart / 4), vec3<i32>(uniforms.bShape.x, uniforms.bShape.y, strideB));";
241
+ let o = "";
242
+ return this.transposeA ? o = "let baseA = getIndexFromCoords3D(vec3<i32>(batchA, 0, globalRowStart / 4), vec3<i32>(uniforms.aShape.x, uniforms.aShape.y, strideA));" : o = "let baseA = getIndexFromCoords3D(vec3<i32>(batchA, globalRowStart, 0), vec3<i32>(uniforms.aShape.x, uniforms.aShape.y, strideA));", `
243
+ ${e}
244
+ ${o}
245
+ ${t}
246
+ `;
247
+ }
248
+ offsetSnippets() {
249
+ let e = "";
250
+ this.transposeA ? e = "let offsetA = baseA + kStart * strideA;" : e = "let offsetA = baseA + kStart / 4;";
251
+ let t = "";
252
+ return this.transposeB ? t = "let offsetB = baseB + kStart / 4;" : t = "let offsetB = baseB + kStart * strideB;", `
253
+ ${e}
254
+ ${t}
255
+ `;
256
+ }
257
+ getUserCode() {
258
+ const e = this.transposeA, t = this.tileInner, o = this.workgroupSize[1] * 4, a = this.workgroupSize[0] * 4, i = e ? o : t, r = e ? t : o, s = this.dimInner, n = Math.ceil(s / t);
259
+ return `
260
+ var<workgroup> mm_Asub : array<array<vec4<f32>, ${i / 4 + (this.transposeA ? 0 : 1)}>, ${r}>;
261
+ var<workgroup> mm_Bsub : array<array<vec4<f32>, ${a / 4 + (this.transposeB ? 1 : 0)}>, ${t}>;
262
+
263
+ ${this.activation ? this.activationSnippet() : ""}
264
+
265
+ ${h()} {
266
+ let batch = i32(globalId.z);
267
+ let batchA = ${this.broadcastBatch ? "batch % uniforms.aShape[0]" : "batch"};
268
+ let batchB = ${this.broadcastBatch ? "batch % uniforms.bShape[0]" : "batch"};
269
+ var kStart = 0;
270
+ let localRow = i32(localId.y);
271
+ let localCol = i32(localId.x);
272
+ let globalRowStart = i32(workgroupId.y) * ${o};
273
+ let globalColStart = i32(workgroupId.x) * ${a};
274
+
275
+ // 4 rows x 4 cols accumulator
276
+ // acc[i] holds row i (4 cols)
277
+ var acc = array<vec4<f32>, 4>(
278
+ vec4<f32>(0.0), vec4<f32>(0.0), vec4<f32>(0.0), vec4<f32>(0.0)
279
+ );
280
+
281
+ ${this.baseIndexSnippets()}
282
+
283
+ for (var t = 0; t < ${n}; t++) {
284
+ ${this.offsetSnippets()}
285
+
286
+ ${this.readASnippet()}
287
+ ${this.readBSnippet()}
288
+
289
+ kStart = kStart + ${t};
290
+ workgroupBarrier();
291
+
292
+ for (var k = 0; k < ${t}; k++) {
293
+ // Load 4 columns of B as a vec4
294
+ let bVec = mm_Bsub[k][localCol];
295
+ let aVec = mm_Asub[k][localRow];
296
+
297
+ // Compute 4 rows
298
+ for (var r = 0; r < 4; r = r + 1) {
299
+ acc[r] = fma(vec4<f32>(aVec[r]), bVec, acc[r]);
300
+ }
301
+ }
302
+ workgroupBarrier();
303
+ }
304
+
305
+ // Write out 4 rows x 2 packed cols (4 unpacked cols)
306
+ let gRow = globalRowStart + localRow * 4;
307
+ let gColPacked = i32(workgroupId.x) * ${this.workgroupSize[0] * 2} + localCol * 2;
308
+
309
+ ${this.outputIndexSnippet}
310
+ for (var i = 0; i < 4; i = i + 1) {
311
+ ${this.scale ? "acc[i] = acc[i] * uniforms.scale;" : ""}
312
+
313
+ ${this.causalMask ? `
314
+ // Causal Masking: mask if col > row + pastLen
315
+ let r = gRow + i;
316
+ let cBase = gColPacked * 2;
317
+ let cVec = vec4<i32>(cBase, cBase + 1, cBase + 2, cBase + 3);
318
+ let mask = cVec > vec4<i32>(r + uniforms.pastLen);
319
+ acc[i] = select(acc[i], vec4<f32>(-uniforms.INFINITY), mask);
320
+ ` : ""}
321
+
322
+ ${this.activation ? "acc[i] = activation(acc[i]);" : ""}
323
+ result[idx0 / 2] = vec2<i32>(
324
+ i32(pack2x16float(acc[i].xy)),
325
+ i32(pack2x16float(acc[i].zw))
326
+ );
327
+
328
+ ${this.outputStrideSnippet}
329
+ }
330
+ }
331
+ `;
332
+ }
333
+ }
334
+ export {
335
+ A as default
336
+ };
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,14 @@
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { BinaryOpProgram as m } from "./utils/binary_op.js";
3
+ import { B as p } from "../../binary_op_util-pKXltfxI.js";
4
+ import { r as c } from "../../tensor_util-DV-FP5Q3.js";
5
+ function i(r) {
6
+ const { a: e, b: n } = r.inputs, t = r.backend, a = new m(p.MUL, e.shape, n.shape), o = t.runWebGPUProgram(a, [e, n], "int32");
7
+ return o.packed = !0, o;
8
+ }
9
+ const s = {
10
+ kernelName: "Mul16",
11
+ backendName: "webgpu",
12
+ kernelFunc: i
13
+ };
14
+ c(s);
@@ -1,49 +1,30 @@
1
- import { f as n } from "../../webgpu_util-pLEV9tks.js";
2
- import { f as p, a4 as h } from "../../index-BzFyqcy-.js";
3
- import { createReduceInfo as u, reduce as c, createReductionShader as m } from "./utils/reductions.js";
4
- class d {
5
- outputShape;
6
- shaderKey = "RMSNorm";
7
- dispatchLayout;
8
- dispatch;
9
- workgroupSize = [64, 1, 1];
10
- variableNames = ["x", "gamma"];
11
- uniforms = "reduceSize : i32,";
12
- inputShape;
13
- size = !0;
14
- constructor(e) {
15
- this.inputShape = [e.batchSize, e.inSize], this.outputShape = this.inputShape, this.dispatchLayout = n(this.outputShape), this.dispatch = [e.batchSize, 1, 1];
16
- }
17
- getUserCode() {
18
- const e = this.workgroupSize[0];
19
- return m(e, "mean", `
20
- candidate = candidate * candidate;
21
- `, `
22
- bestValue = inverseSqrt(bestValue + 1e-8);
23
- `, `
24
- let X = f32(x[offset + k]);
25
- let gamma = gamma[k];
26
- let normalized = X * bestValue;
27
- let outVal = normalized * gamma;
28
- result[offset + k] = f32(outVal);
29
- `);
30
- }
31
- }
32
- function S(a) {
33
- const { x: e, gamma: r } = a.inputs, o = a.backend, s = [e, r], t = u(s, -1), i = new d(t);
34
- if (h(r.shape, [e.shape[e.shape.length - 1]], "Error in RMSNorm: "), e.shape.length !== 3)
1
+ import "../../index-ZyQhjEPo.js";
2
+ import { createReduceInfo as g, reduce as l } from "./utils/reductions.js";
3
+ import { j as w } from "../../tensor-DdQUJZlz.js";
4
+ import { isPackedTensor as f } from "../../utilities/packed.js";
5
+ import { p as k } from "../../pack16-CFUqumar.js";
6
+ import S from "./normRMS16_program.js";
7
+ import z from "./normRMS32_program.js";
8
+ import N from "./utils/deviceInfo.js";
9
+ import { r as b } from "../../tensor_util-DV-FP5Q3.js";
10
+ function P(m) {
11
+ const { x: e, gamma: n } = m.inputs, c = m.backend, i = N(c), s = f(e), a = f(n), o = s || a, r = !o || s ? e : k(e), p = !o || a ? n : k(n), h = [r, p], t = g(h, -1), u = o ? new S(i, t) : new z(i, t);
12
+ if (w(p.shape, [r.shape[r.shape.length - 1]], "Error in RMSNorm: "), e.shape.length !== 3)
35
13
  throw new Error(`rmsNormGPU: input rank ${e.shape.length} not supported, only rank 3 is supported`);
36
- if (t.inSize !== e.shape[2])
37
- throw new Error(`rmsNormGPU: reduction size ${t.inSize} does not match expected size ${e.shape[2]}`);
14
+ if (t.inSize !== r.shape[r.shape.length - 1])
15
+ throw new Error(
16
+ `rmsNormGPU: reduction size ${t.inSize} does not match expected size ${r.shape[r.shape.length - 1]}`
17
+ );
38
18
  if (t.batchSize !== e.shape[0] * e.shape[1])
39
19
  throw new Error(
40
20
  `rmsNormGPU: batch size ${t.batchSize} does not match expected size ${e.shape[0] * e.shape[1]}`
41
21
  );
42
- return c(i, s, o);
22
+ const d = l(u, h, c);
23
+ return d.packed = o, o && !s && r.dispose(), o && !a && p.dispose(), d;
43
24
  }
44
- const l = {
25
+ const G = {
45
26
  kernelName: "RMSNorm",
46
27
  backendName: "webgpu",
47
- kernelFunc: S
28
+ kernelFunc: P
48
29
  };
49
- p(l);
30
+ b(G);
@@ -0,0 +1,9 @@
1
+ import { backend_util } from '@tensorflow/tfjs-core';
2
+ import { ReduceProgram } from './utils/reductions';
3
+ import { DeviceInformation } from './utils/deviceInfo';
4
+ export default class RMSProgram16 extends ReduceProgram {
5
+ constructor(deviceInfo: DeviceInformation, reduceInfo: backend_util.ReduceInfo);
6
+ getPreprocessSnippet(): string;
7
+ getPostprocessSnippet(): string;
8
+ getWriteSnippet(): string;
9
+ }
@@ -0,0 +1,24 @@
1
+ import { ReduceProgram as a } from "./utils/reductions.js";
2
+ class o extends a {
3
+ constructor(e, t) {
4
+ super(e, t, { reductionOp: "mean", elementwise: !0 }, !0), this.shaderKey = "RMSNorm16", this.variableNames.push("gamma"), this.variableComponents = [1, 1];
5
+ }
6
+ getPreprocessSnippet() {
7
+ return "candidate = candidate * candidate;";
8
+ }
9
+ getPostprocessSnippet() {
10
+ return "bestValue = inverseSqrt(bestValue + 1e-8);";
11
+ }
12
+ getWriteSnippet() {
13
+ return `
14
+ let X = unpack2x16float(u32(x[offset + k]));
15
+ let gamma = unpack2x16float(u32(gamma[k]));
16
+ let normalized = X * bestValue;
17
+ let outVal = normalized * gamma;
18
+ result[offset + k] = i32(pack2x16float(outVal));
19
+ `;
20
+ }
21
+ }
22
+ export {
23
+ o as default
24
+ };
@@ -0,0 +1,9 @@
1
+ import { backend_util } from '@tensorflow/tfjs-core';
2
+ import { ReduceProgram } from './utils/reductions';
3
+ import { DeviceInformation } from './utils/deviceInfo';
4
+ export default class RMSProgram32 extends ReduceProgram {
5
+ constructor(deviceInfo: DeviceInformation, reduceInfo: backend_util.ReduceInfo);
6
+ protected getPreprocessSnippet(): string;
7
+ protected getPostprocessSnippet(): string;
8
+ protected getWriteSnippet(): string;
9
+ }
@@ -0,0 +1,24 @@
1
+ import { ReduceProgram as a } from "./utils/reductions.js";
2
+ class o extends a {
3
+ constructor(e, t) {
4
+ super(e, t, { reductionOp: "mean", elementwise: !0 }, !1), this.shaderKey = "RMSNorm32", this.variableNames.push("gamma"), this.variableComponents = [1, 1];
5
+ }
6
+ getPreprocessSnippet() {
7
+ return "candidate = candidate * candidate;";
8
+ }
9
+ getPostprocessSnippet() {
10
+ return "bestValue = inverseSqrt(bestValue + 1e-8);";
11
+ }
12
+ getWriteSnippet() {
13
+ return `
14
+ let X = f32(x[offset + k]);
15
+ let gamma = gamma[k];
16
+ let normalized = X * bestValue;
17
+ let outVal = normalized * gamma;
18
+ result[offset + k] = f32(outVal);
19
+ `;
20
+ }
21
+ }
22
+ export {
23
+ o as default
24
+ };