@genai-fi/nanogpt 0.10.2 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (262) hide show
  1. package/dist/Generator.d.ts +10 -5
  2. package/dist/Generator.js +11760 -146
  3. package/dist/{RealDiv-zz7FpkKX.js → RealDiv-Ds-jvL09.js} +28 -30
  4. package/dist/Reshape-Cd6e-Otn.js +14 -0
  5. package/dist/{Reshape-CHdUjC72.js → Reshape-Ct266DEk.js} +21 -23
  6. package/dist/TeachableLLM.d.ts +4 -3
  7. package/dist/TeachableLLM.js +15 -16
  8. package/dist/Trainer.d.ts +2 -2
  9. package/dist/Trainer.js +6 -6
  10. package/dist/{axis_util-BsIr9ZNu.js → axis_util-DofAuy0p.js} +1 -1
  11. package/dist/backend.js +2 -2
  12. package/dist/{backend_util-B1XRLuq9.js → backend_util-C7NWHpv7.js} +72 -73
  13. package/dist/{backend_webgpu-CqpfEImu.js → backend_webgpu-B0Vls736.js} +52 -54
  14. package/dist/broadcast_to-DDaNMbX7.js +28 -0
  15. package/dist/checks/appendCache.js +2 -2
  16. package/dist/checks/attentionMask.js +3 -3
  17. package/dist/checks/gelu.js +2 -2
  18. package/dist/checks/matMulGelu.js +7 -11
  19. package/dist/checks/normRMS.js +9 -9
  20. package/dist/checks/normRMSGrad.js +3 -3
  21. package/dist/checks/packUnpack.js +2 -2
  22. package/dist/checks/qkv.js +11 -12
  23. package/dist/checks/rope.js +2 -2
  24. package/dist/clip_by_value-Dn5tzexi.js +12 -0
  25. package/dist/complex-DClmWqJt.js +11 -0
  26. package/dist/concat-C6X3AAlQ.js +17 -0
  27. package/dist/{concat_util-iBYIyuQe.js → concat_util-CHsJFZJJ.js} +1 -1
  28. package/dist/{dataset-D2P7rHAw.js → dataset-DcjWqUVQ.js} +135 -137
  29. package/dist/dropout-OxuaJz6z.js +92 -0
  30. package/dist/expand_dims-BzfJK2uc.js +11 -0
  31. package/dist/{exports_initializers-CZSUJoVE.js → exports_initializers-eS9QJ6ut.js} +1 -1
  32. package/dist/floor-DIb-lN_u.js +9 -0
  33. package/dist/gather-BcO5UQNJ.js +9 -0
  34. package/dist/{gelu-Bmhopi0J.js → gelu-DqTbCx5x.js} +10 -11
  35. package/dist/{gpgpu_math-DsCcikas.js → gpgpu_math-CJcbnKPC.js} +841 -1015
  36. package/dist/index-D0RBWjq8.js +3520 -0
  37. package/dist/{index-DRyE072i.js → index-Dj5TkmPY.js} +330 -331
  38. package/dist/{kernel_funcs_utils-CWfOAPGO.js → kernel_funcs_utils-CSaumNDs.js} +132 -134
  39. package/dist/layers/BaseLayer.js +15 -16
  40. package/dist/layers/CausalSelfAttention.js +6 -6
  41. package/dist/layers/MLP.js +4 -4
  42. package/dist/layers/PositionEmbedding.js +7 -7
  43. package/dist/layers/RMSNorm.js +3 -3
  44. package/dist/layers/RoPECache.js +9 -9
  45. package/dist/layers/TiedEmbedding.js +6 -6
  46. package/dist/layers/TransformerBlock.js +1 -1
  47. package/dist/loader/loadTransformers.js +1 -1
  48. package/dist/loader/oldZipLoad.js +21 -22
  49. package/dist/log_sum_exp-VLZgbFAH.js +39 -0
  50. package/dist/main.d.ts +1 -1
  51. package/dist/main.js +49 -50
  52. package/dist/{matMul16-fEAJ4smh.js → matMul16-cDxwemKj.js} +14 -15
  53. package/dist/matMulGelu-B2s_80-H.js +163 -0
  54. package/dist/mat_mul-DxpNTCRz.js +11 -0
  55. package/dist/mod-PrOKlFxH.js +11 -0
  56. package/dist/models/NanoGPTV1.js +2 -2
  57. package/dist/models/model.js +13 -14
  58. package/dist/ones-BX_wEgzB.js +14 -0
  59. package/dist/ops/adamAdjust.js +1 -1
  60. package/dist/ops/adamMoments.js +1 -1
  61. package/dist/ops/add16.js +1 -1
  62. package/dist/ops/appendCache.js +3 -3
  63. package/dist/ops/attentionMask.js +1 -1
  64. package/dist/ops/concat16.js +2 -2
  65. package/dist/ops/cpu/adamAdjust.js +12 -13
  66. package/dist/ops/cpu/adamMoments.js +6 -7
  67. package/dist/ops/cpu/appendCache.js +7 -8
  68. package/dist/ops/cpu/attentionMask.js +11 -11
  69. package/dist/ops/cpu/fusedSoftmax.js +10 -11
  70. package/dist/ops/cpu/gatherSub.js +10 -11
  71. package/dist/ops/cpu/gelu.js +14 -15
  72. package/dist/ops/cpu/matMul16.js +6 -7
  73. package/dist/ops/cpu/matMulGelu.js +5 -6
  74. package/dist/ops/cpu/matMulMul.js +3 -4
  75. package/dist/ops/cpu/mulDropout.js +3 -4
  76. package/dist/ops/cpu/normRMS.js +11 -12
  77. package/dist/ops/cpu/qkv.js +8 -9
  78. package/dist/ops/cpu/rope.js +9 -10
  79. package/dist/ops/cpu/scatterSub.js +14 -16
  80. package/dist/ops/dot16.js +2 -2
  81. package/dist/ops/gatherSub.js +1 -1
  82. package/dist/ops/gelu.js +2 -2
  83. package/dist/ops/grads/add16.js +10 -11
  84. package/dist/ops/grads/attentionMask.js +5 -6
  85. package/dist/ops/grads/gelu.js +3 -4
  86. package/dist/ops/grads/matMul16.js +4 -5
  87. package/dist/ops/grads/matMulGelu.js +8 -9
  88. package/dist/ops/grads/normRMS.js +9 -10
  89. package/dist/ops/grads/pack16.js +4 -5
  90. package/dist/ops/grads/qkv.js +17 -19
  91. package/dist/ops/grads/rope.js +3 -5
  92. package/dist/ops/grads/softmax16.js +3 -4
  93. package/dist/ops/grads/unpack16.js +3 -4
  94. package/dist/ops/grads/utils.d.ts +1 -0
  95. package/dist/ops/grads/utils.js +8 -4
  96. package/dist/ops/matMul16.js +3 -3
  97. package/dist/ops/matMulGelu.js +2 -2
  98. package/dist/ops/matMulMul.js +1 -1
  99. package/dist/ops/mul16.js +1 -1
  100. package/dist/ops/mulDrop.js +1 -1
  101. package/dist/ops/normRMS.js +1 -1
  102. package/dist/ops/pack16.js +3 -4
  103. package/dist/ops/qkv.js +4 -8
  104. package/dist/ops/reshape16.js +16 -18
  105. package/dist/ops/rope.d.ts +1 -1
  106. package/dist/ops/rope.js +3 -8
  107. package/dist/ops/scatterSub.js +1 -1
  108. package/dist/ops/slice16.js +2 -2
  109. package/dist/ops/softmax16.js +5 -8
  110. package/dist/ops/sub16.js +1 -1
  111. package/dist/ops/sum16.js +2 -2
  112. package/dist/ops/transpose16.js +23 -24
  113. package/dist/ops/unpack16.js +2 -2
  114. package/dist/ops/webgl/adamAdjust.js +2 -3
  115. package/dist/ops/webgl/adamMoments.js +1 -2
  116. package/dist/ops/webgl/appendCache.js +1 -2
  117. package/dist/ops/webgl/attentionMask.js +5 -6
  118. package/dist/ops/webgl/fusedSoftmax.js +6 -8
  119. package/dist/ops/webgl/gatherSub.js +6 -7
  120. package/dist/ops/webgl/gelu.js +2 -3
  121. package/dist/ops/webgl/log.js +11 -12
  122. package/dist/ops/webgl/matMul16.js +15 -16
  123. package/dist/ops/webgl/matMulGelu.js +7 -111
  124. package/dist/ops/webgl/matMulMul.js +14 -15
  125. package/dist/ops/webgl/mulDropout.js +8 -9
  126. package/dist/ops/webgl/normRMS.js +7 -8
  127. package/dist/ops/webgl/qkv.js +5 -6
  128. package/dist/ops/webgl/rope.js +7 -8
  129. package/dist/ops/webgl/scatterSub.js +5 -6
  130. package/dist/ops/webgpu/adamAdjust.js +10 -12
  131. package/dist/ops/webgpu/adamMoments.js +8 -10
  132. package/dist/ops/webgpu/add16.js +8 -9
  133. package/dist/ops/webgpu/appendCache.js +23 -25
  134. package/dist/ops/webgpu/attentionMask.js +10 -12
  135. package/dist/ops/webgpu/attentionMask32_program.js +2 -2
  136. package/dist/ops/webgpu/concat16.js +12 -14
  137. package/dist/ops/webgpu/gatherSub.js +9 -11
  138. package/dist/ops/webgpu/gelu.js +28 -29
  139. package/dist/ops/webgpu/matMul16.js +26 -28
  140. package/dist/ops/webgpu/matMul16_program.js +4 -5
  141. package/dist/ops/webgpu/mul16.js +7 -8
  142. package/dist/ops/webgpu/normRMS.js +17 -19
  143. package/dist/ops/webgpu/normRMSGrad.js +21 -28
  144. package/dist/ops/webgpu/pack16.js +12 -13
  145. package/dist/ops/webgpu/pack16_program.js +2 -2
  146. package/dist/ops/webgpu/qkv.js +13 -15
  147. package/dist/ops/webgpu/rope.js +25 -27
  148. package/dist/ops/webgpu/scatterSub.js +7 -9
  149. package/dist/ops/webgpu/slice16.js +21 -23
  150. package/dist/ops/webgpu/softmax16.js +17 -19
  151. package/dist/ops/webgpu/softmax16_program.js +2 -2
  152. package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
  153. package/dist/ops/webgpu/softmax16grad.js +7 -8
  154. package/dist/ops/webgpu/sub16.js +8 -9
  155. package/dist/ops/webgpu/sum16.js +19 -21
  156. package/dist/ops/webgpu/transpose16.js +19 -20
  157. package/dist/ops/webgpu/transpose16_program.js +2 -2
  158. package/dist/ops/webgpu/transpose16_shared_program.js +11 -12
  159. package/dist/ops/webgpu/unpack16.js +3 -4
  160. package/dist/ops/webgpu/utils/binary_op.js +7 -8
  161. package/dist/ops/webgpu/utils/reductions.js +14 -22
  162. package/dist/ops-FJapAPfm.js +476 -0
  163. package/dist/pack16-k4jq6aMX.js +39 -0
  164. package/dist/patches/webgpu_backend.js +19 -20
  165. package/dist/patches/webgpu_base.js +1 -1
  166. package/dist/patches/webgpu_program.js +15 -16
  167. package/dist/{random_width-BVV9HveY.js → random_width-UGQn4OWb.js} +2506 -2761
  168. package/dist/range-CuGvVN2c.js +10 -0
  169. package/dist/relu-Cf80uA2p.js +9 -0
  170. package/dist/reshape-CkjKPPqB.js +9 -0
  171. package/dist/resize_nearest_neighbor-DB8k9KN_.js +175 -0
  172. package/dist/rope-BmZmp9uP.js +24 -0
  173. package/dist/{scatter_nd_util-C7zXRT_h.js → scatter_nd_util-BY22Cc-C.js} +1 -1
  174. package/dist/selu_util-BuLbmbrl.js +44 -0
  175. package/dist/{shared-CHhxz-O5.js → shared-B7USJZgw.js} +1 -1
  176. package/dist/{shared-D2NP_CpY.js → shared-BQboIImQ.js} +379 -381
  177. package/dist/slice-Aqy7KbJh.js +12 -0
  178. package/dist/{slice_util-DyjSAD0u.js → slice_util-D8CQRenR.js} +7 -7
  179. package/dist/{softmax-C9JQEtnO.js → softmax-faLoUZVT.js} +4 -5
  180. package/dist/split-BNz5jcGc.js +9 -0
  181. package/dist/squeeze--YMgaAAf.js +10 -0
  182. package/dist/stack-WJK22CFn.js +11 -0
  183. package/dist/step-dXR33iOg.js +261 -0
  184. package/dist/sum-BdplSvq_.js +11 -0
  185. package/dist/{tensor-0r5yOo2R.js → tensor-BQqrDvpx.js} +1 -1
  186. package/dist/tensor1d-LxP9asMm.js +11 -0
  187. package/dist/{tensor2d-CSB4KOb0.js → tensor2d-BN1sSfQO.js} +6 -7
  188. package/dist/{tensor4d-D7bLqGqz.js → tensor4d-DVwr7pLF.js} +6 -7
  189. package/dist/{tfjs_backend-CNkSTL0c.js → tfjs_backend-Vi4JfLzT.js} +256 -265
  190. package/dist/tile-CvN_LyVr.js +11 -0
  191. package/dist/tokeniser/BaseTokeniser.d.ts +27 -0
  192. package/dist/tokeniser/BaseTokeniser.js +94 -0
  193. package/dist/tokeniser/CharTokeniser.d.ts +4 -3
  194. package/dist/tokeniser/CharTokeniser.js +46 -32
  195. package/dist/tokeniser/bpe.d.ts +4 -3
  196. package/dist/tokeniser/bpe.js +60 -45
  197. package/dist/tokeniser/type.d.ts +11 -0
  198. package/dist/training/Adam.js +2 -2
  199. package/dist/training/AdamExt.js +1 -1
  200. package/dist/training/DatasetBuilder.d.ts +2 -2
  201. package/dist/training/DatasetBuilder.js +32 -36
  202. package/dist/training/FullTrainer.js +1 -1
  203. package/dist/training/Trainer.d.ts +3 -3
  204. package/dist/training/Trainer.js +2 -2
  205. package/dist/training/sparseCrossEntropy.js +5 -5
  206. package/dist/transpose-JawVKyZy.js +36 -0
  207. package/dist/unsorted_segment_sum-LAbmE9G4.js +277 -0
  208. package/dist/utilities/dummy.js +3 -3
  209. package/dist/utilities/multinomialCPU.js +2 -2
  210. package/dist/utilities/packed.d.ts +1 -4
  211. package/dist/utilities/packed.js +10 -745
  212. package/dist/utilities/performance.js +1 -1
  213. package/dist/utilities/profile.js +1 -1
  214. package/dist/utilities/safetensors.js +2 -2
  215. package/dist/utilities/sentences.js +5 -5
  216. package/dist/utilities/weights.js +2 -2
  217. package/dist/{variable-DzfrwYuP.js → variable-DQ9yYgEU.js} +1 -1
  218. package/dist/{webgpu_program-DzaQiqel.js → webgpu_program-CAE4RICo.js} +177 -171
  219. package/dist/{webgpu_util-0_ubCEHJ.js → webgpu_util-BdovYhXr.js} +34 -35
  220. package/dist/zeros-DeiE2zTa.js +13 -0
  221. package/dist/zeros_like-BAz3iKru.js +721 -0
  222. package/package.json +4 -2
  223. package/dist/Reshape-CDVLyVfz.js +0 -16
  224. package/dist/broadcast_to-B0ChcDaz.js +0 -30
  225. package/dist/complex-BBiRlsVq.js +0 -13
  226. package/dist/concat-DmBLPVGC.js +0 -19
  227. package/dist/dropout-B1x1kYMa.js +0 -99
  228. package/dist/expand_dims-ouvfxQ1n.js +0 -13
  229. package/dist/gather-CH9sdacz.js +0 -10
  230. package/dist/index-D6Q1lPZO.js +0 -2157
  231. package/dist/log_sum_exp-D3ftBNY5.js +0 -41
  232. package/dist/mat_mul-C59XWcJd.js +0 -12
  233. package/dist/mod-DESSvHIU.js +0 -12
  234. package/dist/mulmat_packed_gpu-Coh6qbJk.js +0 -55
  235. package/dist/ones-jU9jlQvM.js +0 -15
  236. package/dist/ops-BFDtP6th.js +0 -645
  237. package/dist/pack16-CmVZs6af.js +0 -41
  238. package/dist/patches/PackedTensor.d.ts +0 -12
  239. package/dist/patches/PackedTensor.js +0 -11
  240. package/dist/patches/engine.d.ts +0 -261
  241. package/dist/patches/engine.js +0 -12
  242. package/dist/patches/tape.d.ts +0 -12
  243. package/dist/patches/tape.js +0 -5
  244. package/dist/range-ZZZD60Fx.js +0 -11
  245. package/dist/reciprocal-CrYlsAGD.js +0 -10
  246. package/dist/register_all_kernels-nvj2k7OC.js +0 -12307
  247. package/dist/relu-BYDneVPn.js +0 -10
  248. package/dist/reshape-CaPQzFvz.js +0 -10
  249. package/dist/rope-s4W2XO9B.js +0 -32
  250. package/dist/selu_util-BGPXmd4B.js +0 -303
  251. package/dist/sin-Djs4aQiu.js +0 -16
  252. package/dist/slice-DvovR5wq.js +0 -13
  253. package/dist/split-DBck65sX.js +0 -10
  254. package/dist/squeeze-C00Ipm_7.js +0 -11
  255. package/dist/stack-ChnHwRpX.js +0 -13
  256. package/dist/sum-ywRJj3Zr.js +0 -12
  257. package/dist/tensor-CzmOBsdf.js +0 -909
  258. package/dist/tensor1d-BlUT89BP.js +0 -12
  259. package/dist/tensor_util-DfwaWayG.js +0 -523
  260. package/dist/tile-CR074jmp.js +0 -13
  261. package/dist/transpose-DH4gmHvu.js +0 -38
  262. package/dist/zeros-DBFVbpv5.js +0 -14
@@ -1,18 +1,17 @@
1
- import { e as l } from "../../index-D6Q1lPZO.js";
2
- import { a as o } from "../../tensor_util-DfwaWayG.js";
3
- function i(e, r, n) {
4
- return l().runKernel("MatMulGeluGrad", { dy: e, x: r, kernel: n });
1
+ import { j as a, e as o } from "../../index-D0RBWjq8.js";
2
+ function s(e, n, r) {
3
+ return o().runKernel("MatMulGeluGrad", { dy: e, x: n, kernel: r });
5
4
  }
6
- const s = {
5
+ const d = {
7
6
  kernelName: "MatMulGelu",
8
7
  inputsToSave: ["x", "kernel"],
9
8
  outputsToSave: [],
10
- gradFunc: (e, r) => {
11
- const [n, t] = r, [u, a] = i(e, n, t);
9
+ gradFunc: (e, n) => {
10
+ const [r, t] = n, [u, l] = s(e, r, t);
12
11
  return {
13
12
  x: () => u,
14
- kernel: () => a
13
+ kernel: () => l
15
14
  };
16
15
  }
17
16
  };
18
- o(s);
17
+ a(d);
@@ -1,21 +1,20 @@
1
- import { e as t } from "../../index-D6Q1lPZO.js";
2
- import { a as g } from "../../tensor_util-DfwaWayG.js";
3
- function i(r, a, m) {
4
- return t().runKernel("RMSNormGrad", { dy: r, x: a, gamma: m });
1
+ import { j as t, e as g } from "../../index-D0RBWjq8.js";
2
+ function s(r, a, n) {
3
+ return g().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
5
4
  }
6
- const s = {
5
+ const u = {
7
6
  kernelName: "RMSNorm",
8
7
  inputsToSave: ["x", "gamma"],
9
8
  outputsToSave: [],
10
9
  gradFunc: (r, a) => {
11
- const [m, n] = a, [o, e] = i(r, m, n);
10
+ const [n, e] = a, [m, o] = s(r, n, e);
12
11
  return {
13
- x: () => o,
14
- gamma: () => e
12
+ x: () => m,
13
+ gamma: () => o
15
14
  };
16
15
  }
17
16
  };
18
- g(s);
17
+ t(u);
19
18
  export {
20
- s as normRMSGradConfig
19
+ u as normRMSGradConfig
21
20
  };
@@ -1,7 +1,6 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { b as t } from "../../pack16-CmVZs6af.js";
3
- import "../../slice-DvovR5wq.js";
4
- import "../../tensor_util-DfwaWayG.js";
1
+ import "../../index-D0RBWjq8.js";
2
+ import { b as i } from "../../pack16-k4jq6aMX.js";
3
+ import "../../slice-Aqy7KbJh.js";
5
4
  export {
6
- t as packGradConfig
5
+ i as packGradConfig
7
6
  };
@@ -1,36 +1,34 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { a as u } from "../../matMul16-fEAJ4smh.js";
3
- import { concat16 as f } from "../concat16.js";
4
- import { sum16 as g } from "../sum16.js";
5
- import { packTensor as k, isPackedTensor as l } from "../../utilities/packed.js";
6
- import { a as h } from "../../tensor_util-DfwaWayG.js";
7
- import { s as G } from "../../squeeze-C00Ipm_7.js";
8
- const m = {
1
+ import { j as u } from "../../index-D0RBWjq8.js";
2
+ import { a as f } from "../../matMul16-cDxwemKj.js";
3
+ import { concat16 as g } from "../concat16.js";
4
+ import { sum16 as l } from "../sum16.js";
5
+ import { s as k } from "../../squeeze--YMgaAAf.js";
6
+ const i = {
9
7
  kernelName: "QKV",
10
8
  inputsToSave: ["x", "kernel"],
11
9
  outputsToSave: [],
12
10
  gradFunc: (e, s) => {
13
- const [o, n, t] = e, [a] = s, p = f([o, n, t], 1);
14
- o.dispose(), n.dispose(), t.dispose();
15
- const c = [a.shape[0], a.shape[1], 3 * a.shape[2]], i = u.gradFunc(p, s, {
11
+ const [r, n, t] = e, [a] = s, p = g([r, n, t], 1);
12
+ r.dispose(), n.dispose(), t.dispose();
13
+ const m = [a.shape[0], a.shape[1], 3 * a.shape[2]], d = f.gradFunc(p, s, {
16
14
  transposeA: !1,
17
15
  transposeB: !1,
18
- originalShape: c,
16
+ originalShape: m,
19
17
  perm: [0, 2, 1, 3]
20
18
  });
21
19
  return p.dispose(), {
22
- x: () => i.A(),
20
+ x: () => d.A(),
23
21
  kernel: () => {
24
- const r = i.B(), d = r.shape[0] === 1 ? G(r, [0]) : g(r, 0);
25
- return r.dispose(), l(r) ? k(d) : d;
22
+ const o = d.B(), c = o.shape[0] === 1 ? k(o, [0]) : l(o, 0);
23
+ return o.dispose(), c;
26
24
  }
27
25
  };
28
26
  }
29
27
  };
30
- function A(e, s, o) {
31
- return m.gradFunc(e, [s, o], {});
28
+ function B(e, s, r) {
29
+ return i.gradFunc(e, [s, r], {});
32
30
  }
33
- h(m);
31
+ u(i);
34
32
  export {
35
- A as qkvGrad
33
+ B as qkvGrad
36
34
  };
@@ -1,7 +1,5 @@
1
- import "../../utilities/packed.js";
2
- import "../../index-D6Q1lPZO.js";
3
- import { a as t } from "../../rope-s4W2XO9B.js";
4
- import "../../tensor_util-DfwaWayG.js";
1
+ import "../../index-D0RBWjq8.js";
2
+ import { a as p } from "../../rope-BmZmp9uP.js";
5
3
  export {
6
- t as ropeGradConfig
4
+ p as ropeGradConfig
7
5
  };
@@ -1,8 +1,7 @@
1
- import { e as n } from "../../index-D6Q1lPZO.js";
1
+ import { j as n, e as a } from "../../index-D0RBWjq8.js";
2
2
  import { isPackedTensor as t } from "../../utilities/packed.js";
3
- import { a } from "../../tensor_util-DfwaWayG.js";
4
3
  function s(r, e) {
5
- return n().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
4
+ return a().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
6
5
  }
7
6
  const i = {
8
7
  kernelName: "Softmax16",
@@ -20,7 +19,7 @@ const i = {
20
19
  };
21
20
  }
22
21
  };
23
- a(i);
22
+ n(i);
24
23
  export {
25
24
  i as softmax16GradConfig
26
25
  };
@@ -1,6 +1,5 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { a as i } from "../../pack16-CmVZs6af.js";
3
- import "../../tensor_util-DfwaWayG.js";
1
+ import "../../index-D0RBWjq8.js";
2
+ import { a as p } from "../../pack16-k4jq6aMX.js";
4
3
  export {
5
- i as unpackGradConfig
4
+ p as unpackGradConfig
6
5
  };
@@ -1,3 +1,4 @@
1
1
  import { TensorInfo } from '@tensorflow/tfjs-core';
2
2
  export declare function forceFloat<T extends TensorInfo>(x: T): T;
3
3
  export declare function forceInt<T extends TensorInfo>(x: T): T;
4
+ export declare function forcePacked<T extends TensorInfo>(x: T): T;
@@ -1,10 +1,14 @@
1
- function n(t) {
1
+ function e(t) {
2
2
  return t.dtype = "float32", t;
3
3
  }
4
- function e(t) {
4
+ function n(t) {
5
5
  return t.dtype = "int32", t;
6
6
  }
7
+ function r(t) {
8
+ return t.dtype = "packedF16", t;
9
+ }
7
10
  export {
8
- n as forceFloat,
9
- e as forceInt
11
+ e as forceFloat,
12
+ n as forceInt,
13
+ r as forcePacked
10
14
  };
@@ -1,9 +1,9 @@
1
- import "../index-D6Q1lPZO.js";
2
- import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-fEAJ4smh.js";
1
+ import "../index-D0RBWjq8.js";
2
+ import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-cDxwemKj.js";
3
3
  import "./webgl/matMul16.js";
4
4
  import "./cpu/matMul16.js";
5
5
  import "../utilities/packed.js";
6
- import "../pack16-CmVZs6af.js";
6
+ import "../pack16-k4jq6aMX.js";
7
7
  export {
8
8
  p as matMul16,
9
9
  u as matMul16Gelu,
@@ -1,6 +1,6 @@
1
- import { e as u } from "../index-D6Q1lPZO.js";
1
+ import { e as u } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/matMulGelu.js";
3
- import "./webgl/matMulGelu.js";
3
+ import "../matMulGelu-B2s_80-H.js";
4
4
  import "./grads/matMulGelu.js";
5
5
  function M(r, e) {
6
6
  return u().runKernel("MatMulGelu", { x: r, kernel: e });
@@ -1,4 +1,4 @@
1
- import { e as u } from "../index-D6Q1lPZO.js";
1
+ import { e as u } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/matMulMul.js";
3
3
  import "./webgl/matMulMul.js";
4
4
  function m(e, r, t, l = !1, n = !1) {
package/dist/ops/mul16.js CHANGED
@@ -1,4 +1,4 @@
1
- import { m as t, e as u } from "../index-D6Q1lPZO.js";
1
+ import { m as t, e as u } from "../index-D0RBWjq8.js";
2
2
  import { isPackedTensor as n } from "../utilities/packed.js";
3
3
  function i(r, e) {
4
4
  return !n(r) && !n(e) ? t(r, e) : u().runKernel("Mul16", { a: r, b: e });
@@ -1,4 +1,4 @@
1
- import { e as t } from "../index-D6Q1lPZO.js";
1
+ import { e as t } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/mulDropout.js";
3
3
  import "./webgl/mulDropout.js";
4
4
  function m(r, o, e, n) {
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-D6Q1lPZO.js";
1
+ import { e as n } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/normRMS.js";
3
3
  import "./webgl/normRMS.js";
4
4
  import "./grads/normRMS.js";
@@ -1,6 +1,5 @@
1
- import "../utilities/packed.js";
2
- import "../index-D6Q1lPZO.js";
3
- import { p as t } from "../pack16-CmVZs6af.js";
1
+ import "../index-D0RBWjq8.js";
2
+ import { p as a } from "../pack16-k4jq6aMX.js";
4
3
  export {
5
- t as pack16
4
+ a as pack16
6
5
  };
package/dist/ops/qkv.js CHANGED
@@ -1,14 +1,10 @@
1
- import { e as m } from "../index-D6Q1lPZO.js";
1
+ import { e as t } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/qkv.js";
3
3
  import "./webgl/qkv.js";
4
4
  import "./grads/qkv.js";
5
- import { packTensor as f } from "../utilities/packed.js";
6
- function l(n, t, e, r = !1) {
7
- const o = m().runKernel("QKV", { x: n, kernel: t }, { heads: e, packed: r });
8
- return r && o.forEach((i) => {
9
- f(i);
10
- }), o;
5
+ function u(r, e, n, o = !1) {
6
+ return t().runKernel("QKV", { x: r, kernel: e }, { heads: n, packed: o });
11
7
  }
12
8
  export {
13
- l as qkv
9
+ u as qkv
14
10
  };
@@ -1,43 +1,41 @@
1
- import { e as c } from "../index-D6Q1lPZO.js";
2
- import { isPackedTensor as u, packTensor as i } from "../utilities/packed.js";
3
- import { r as p } from "../reshape-CaPQzFvz.js";
4
- import { a as l, r as t } from "../tensor_util-DfwaWayG.js";
5
- const m = {
1
+ import { j as p, h as s, e as u } from "../index-D0RBWjq8.js";
2
+ import { r as c } from "../reshape-CkjKPPqB.js";
3
+ const i = {
6
4
  kernelName: "Reshape16",
7
5
  inputsToSave: ["x"],
8
6
  gradFunc: (e, r) => {
9
7
  const [n] = r;
10
8
  if (Array.isArray(e))
11
9
  throw new Error("Reshape16 gradient does not support multiple outputs.");
12
- return { x: () => f(e, n.shape) };
10
+ return { x: () => m(e, n.shape) };
13
11
  }
14
12
  };
15
- l(m);
13
+ p(i);
16
14
  function a(e) {
17
- const { inputs: r, attrs: n } = e, { x: s } = r, { shape: o } = n;
18
- return u(s) ? i(p(s, o)) : p(s, o);
15
+ const { inputs: r, attrs: n } = e, { x: t } = r, { shape: o } = n;
16
+ return c(t, o);
19
17
  }
20
- const k = {
18
+ const l = {
21
19
  kernelName: "Reshape16",
22
20
  backendName: "webgpu",
23
21
  kernelFunc: a
24
22
  };
25
- t(k);
26
- const g = {
23
+ s(l);
24
+ const h = {
27
25
  kernelName: "Reshape16",
28
26
  backendName: "webgl",
29
27
  kernelFunc: a
30
28
  };
31
- t(g);
32
- const h = {
29
+ s(h);
30
+ const g = {
33
31
  kernelName: "Reshape16",
34
32
  backendName: "cpu",
35
33
  kernelFunc: a
36
34
  };
37
- t(h);
38
- function f(e, r) {
39
- return c().runKernel("Reshape16", { x: e }, { shape: r });
35
+ s(g);
36
+ function m(e, r) {
37
+ return u().runKernel("Reshape16", { x: e }, { shape: r });
40
38
  }
41
39
  export {
42
- f as reshape16
40
+ m as reshape16
43
41
  };
@@ -1,3 +1,3 @@
1
1
  import { default as RoPECache } from '../layers/RoPECache';
2
- import { Tensor } from '@tensorflow/tfjs';
2
+ import { Tensor } from '@tensorflow/tfjs-core';
3
3
  export declare function rope(x: Tensor, cache: RoPECache, pastLength: number, negSin?: boolean): Tensor;
package/dist/ops/rope.js CHANGED
@@ -1,12 +1,7 @@
1
- import "../index-D6Q1lPZO.js";
2
- import "../random_width-BVV9HveY.js";
3
- import "../register_all_kernels-nvj2k7OC.js";
4
- import "../index-Cp39cXWe.js";
5
- import "../dataset-D2P7rHAw.js";
1
+ import "../index-D0RBWjq8.js";
6
2
  import "./cpu/rope.js";
7
3
  import "./webgl/rope.js";
8
- import { r as x } from "../rope-s4W2XO9B.js";
9
- import "../utilities/packed.js";
4
+ import { r as i } from "../rope-BmZmp9uP.js";
10
5
  export {
11
- x as rope
6
+ i as rope
12
7
  };
@@ -1,4 +1,4 @@
1
- import { e as i } from "../index-D6Q1lPZO.js";
1
+ import { e as i } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/scatterSub.js";
3
3
  import "./webgl/scatterSub.js";
4
4
  function c(t, r, e) {
@@ -1,6 +1,6 @@
1
1
  import { isPackedTensor as n } from "../utilities/packed.js";
2
- import { e as c } from "../index-D6Q1lPZO.js";
3
- import { s as i } from "../slice-DvovR5wq.js";
2
+ import { e as c } from "../index-D0RBWjq8.js";
3
+ import { s as i } from "../slice-Aqy7KbJh.js";
4
4
  function a(r, e, o) {
5
5
  return n(r) ? c().runKernel("Slice16", { x: r }, { begin: e, size: o }) : i(r, e, o);
6
6
  }
@@ -1,12 +1,9 @@
1
- import { e } from "../index-D6Q1lPZO.js";
1
+ import { e as n } from "../index-D0RBWjq8.js";
2
2
  import "./grads/softmax16.js";
3
- import { isPackedTensor as m, packTensor as a } from "../utilities/packed.js";
4
- function p(r) {
5
- if (!m(r))
6
- return e().runKernel("Softmax", { logits: r }, { dim: r.rank - 1 });
7
- const n = e().runKernel("Softmax16", { logits: r }, { dim: r.rank - 1 });
8
- return m(r) ? a(n) : n;
3
+ import { isPackedTensor as e } from "../utilities/packed.js";
4
+ function t(r) {
5
+ return e(r) ? n().runKernel("Softmax16", { logits: r }, { dim: r.rank - 1 }) : n().runKernel("Softmax", { logits: r }, { dim: r.rank - 1 });
9
6
  }
10
7
  export {
11
- p as softmax16
8
+ t as softmax16
12
9
  };
package/dist/ops/sub16.js CHANGED
@@ -1,4 +1,4 @@
1
- import { c as s, e as t } from "../index-D6Q1lPZO.js";
1
+ import { c as s, e as t } from "../index-D0RBWjq8.js";
2
2
  import { isPackedTensor as n } from "../utilities/packed.js";
3
3
  function c(r, e) {
4
4
  return !n(r) && !n(e) ? s(r, e) : t().runKernel("Sub16", { a: r, b: e });
package/dist/ops/sum16.js CHANGED
@@ -1,6 +1,6 @@
1
- import { e as t } from "../index-D6Q1lPZO.js";
1
+ import { e as t } from "../index-D0RBWjq8.js";
2
2
  import { isPackedTensor as s } from "../utilities/packed.js";
3
- import { s as n } from "../sum-ywRJj3Zr.js";
3
+ import { s as n } from "../sum-BdplSvq_.js";
4
4
  function p(r, o, e = !1) {
5
5
  if (!s(r))
6
6
  return n(r, o, e);
@@ -1,41 +1,40 @@
1
- import { e as i } from "../index-D6Q1lPZO.js";
2
- import { forceInt as u, forceFloat as l } from "./grads/utils.js";
3
- import { g as m } from "../axis_util-BsIr9ZNu.js";
4
- import { isPackedTensor as f, packTensor as g } from "../utilities/packed.js";
5
- import { t as a } from "../transpose-DH4gmHvu.js";
6
- import { a as d, r as p } from "../tensor_util-DfwaWayG.js";
7
- const k = {
1
+ import { j as i, h as p, e as u } from "../index-D0RBWjq8.js";
2
+ import { forcePacked as l, forceFloat as m } from "./grads/utils.js";
3
+ import { g } from "../axis_util-DofAuy0p.js";
4
+ import { isPackedTensor as f } from "../utilities/packed.js";
5
+ import { t as a } from "../transpose-JawVKyZy.js";
6
+ const d = {
8
7
  kernelName: "Transpose16",
9
- gradFunc: (e, s, o) => {
10
- if (Array.isArray(e))
8
+ gradFunc: (r, s, t) => {
9
+ if (Array.isArray(r))
11
10
  throw new Error("Transpose16 gradient does not support multiple outputs.");
12
- const n = o, { perm: r } = n, t = m(r);
13
- return { x: () => w(e, t) };
11
+ const n = t, { perm: e } = n, o = g(e);
12
+ return { x: () => T(r, o) };
14
13
  }
15
14
  };
16
- d(k);
17
- function c(e) {
18
- const { inputs: s, attrs: o } = e, { x: n } = s, { perm: r } = o, t = f(n);
19
- if (t && r[r.length - 1] !== n.shape.length - 1)
15
+ i(d);
16
+ function c(r) {
17
+ const { inputs: s, attrs: t } = r, { x: n } = s, { perm: e } = t, o = f(n);
18
+ if (o && e[e.length - 1] !== n.shape.length - 1)
20
19
  throw new Error("Transpose16 currently only supports the last axis being unchanged.");
21
- return t ? g(u(a(l(n), r))) : a(n, r);
20
+ return o ? l(a(m(n), e)) : a(n, e);
22
21
  }
23
- const h = {
22
+ const k = {
24
23
  kernelName: "Transpose16",
25
24
  backendName: "webgl",
26
25
  kernelFunc: c
27
26
  };
28
- p(h);
29
- const T = {
27
+ p(k);
28
+ const h = {
30
29
  kernelName: "Transpose16",
31
30
  backendName: "cpu",
32
31
  kernelFunc: c
33
32
  };
34
- p(T);
35
- function w(e, s) {
36
- return s == null && (s = e.shape.map((n, r) => r).reverse()), i().runKernel("Transpose16", { x: e }, { perm: s });
33
+ p(h);
34
+ function T(r, s) {
35
+ return s == null && (s = r.shape.map((n, e) => e).reverse()), u().runKernel("Transpose16", { x: r }, { perm: s });
37
36
  }
38
37
  export {
39
- w as transpose16,
40
- k as transpose16GradConfig
38
+ T as transpose16,
39
+ d as transpose16GradConfig
41
40
  };
@@ -1,5 +1,5 @@
1
- import "../index-D6Q1lPZO.js";
2
- import { u as t } from "../pack16-CmVZs6af.js";
1
+ import "../index-D0RBWjq8.js";
2
+ import { u as t } from "../pack16-k4jq6aMX.js";
3
3
  import "../utilities/packed.js";
4
4
  export {
5
5
  t as unpack16
@@ -1,6 +1,5 @@
1
- import { r as n } from "../../Reshape-CHdUjC72.js";
2
- import "../../index-D6Q1lPZO.js";
3
- import { r as f } from "../../tensor_util-DfwaWayG.js";
1
+ import { r as n } from "../../Reshape-Ct266DEk.js";
2
+ import { h as f } from "../../index-D0RBWjq8.js";
4
3
  class v {
5
4
  variableNames = ["moments", "value"];
6
5
  outputShape;
@@ -1,5 +1,4 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as m } from "../../tensor_util-DfwaWayG.js";
1
+ import { h as m } from "../../index-D0RBWjq8.js";
3
2
  class i {
4
3
  variableNames = ["moments", "gradient"];
5
4
  outputShape;
@@ -1,5 +1,4 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as p } from "../../tensor_util-DfwaWayG.js";
1
+ import { h as p } from "../../index-D0RBWjq8.js";
3
2
  class m {
4
3
  variableNames = ["cache", "item"];
5
4
  outputShape;
@@ -1,6 +1,5 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as d } from "../../tensor_util-DfwaWayG.js";
3
- class h {
1
+ import { h } from "../../index-D0RBWjq8.js";
2
+ class m {
4
3
  variableNames = ["q", "k"];
5
4
  outputShape;
6
5
  userCode;
@@ -35,12 +34,12 @@ class h {
35
34
  }
36
35
  }
37
36
  function l(o) {
38
- const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], m = new h(i, u, r, c, p);
39
- return a.runWebGLProgram(m, [t, e], "float32", [[s], [n], [Number.NEGATIVE_INFINITY]]);
37
+ const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], d = new m(i, u, r, c, p);
38
+ return a.runWebGLProgram(d, [t, e], "float32", [[s], [n], [Number.NEGATIVE_INFINITY]]);
40
39
  }
41
40
  const f = {
42
41
  kernelName: "AttentionMask",
43
42
  backendName: "webgl",
44
43
  kernelFunc: l
45
44
  };
46
- d(f);
45
+ h(f);
@@ -1,9 +1,7 @@
1
- import { m as b, s as I, r as k } from "../../RealDiv-zz7FpkKX.js";
2
- import { r as v } from "../../Reshape-CHdUjC72.js";
3
- import "../../index-D6Q1lPZO.js";
4
- import { r as w } from "../../tensor_util-DfwaWayG.js";
5
- import { p as P } from "../../tensor-CzmOBsdf.js";
6
- import { e as S } from "../../axis_util-BsIr9ZNu.js";
1
+ import { m as b, s as I, r as k } from "../../RealDiv-Ds-jvL09.js";
2
+ import { r as v } from "../../Reshape-Ct266DEk.js";
3
+ import { h as w, af as P } from "../../index-D0RBWjq8.js";
4
+ import { e as S } from "../../axis_util-DofAuy0p.js";
7
5
  class T {
8
6
  variableNames = ["logits", "maxLogits"];
9
7
  outputShape;
@@ -62,11 +60,11 @@ function L(r) {
62
60
  o.disposeIntermediateTensorInfo(d);
63
61
  const p = I({ inputs: { x: s }, backend: o, attrs: { axis: i, keepDims: !1 } }), a = v({ inputs: { x: p }, backend: o, attrs: { shape: f } });
64
62
  if (n !== void 0 && n > 0) {
65
- const g = new C(e.shape), h = o.runWebGLProgram(g, [s, a], "float32", [
63
+ const h = new C(e.shape), g = o.runWebGLProgram(h, [s, a], "float32", [
66
64
  [n],
67
65
  [c ?? Math.random() * 1e4]
68
66
  ]);
69
- return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a), h;
67
+ return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a), g;
70
68
  }
71
69
  const x = k({ inputs: { a: s, b: a }, backend: o });
72
70
  return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a), x;
@@ -1,6 +1,5 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as i } from "../../tensor_util-DfwaWayG.js";
3
- class l {
1
+ import { h as l } from "../../index-D0RBWjq8.js";
2
+ class u {
4
3
  variableNames = ["labels", "logits", "values"];
5
4
  outputShape;
6
5
  userCode;
@@ -16,13 +15,13 @@ class l {
16
15
  `;
17
16
  }
18
17
  }
19
- function u(t) {
20
- const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new l(r);
18
+ function i(t) {
19
+ const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new u(r);
21
20
  return a.runWebGLProgram(n, [o, e, s], "float32");
22
21
  }
23
22
  const c = {
24
23
  kernelName: "EfficientGatherSub",
25
24
  backendName: "webgl",
26
- kernelFunc: u
25
+ kernelFunc: i
27
26
  };
28
- i(c);
27
+ l(c);
@@ -1,6 +1,5 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { u as s, C as i } from "../../kernel_funcs_utils-CWfOAPGO.js";
3
- import { r as a } from "../../tensor_util-DfwaWayG.js";
1
+ import { h as a } from "../../index-D0RBWjq8.js";
2
+ import { u as s, C as i } from "../../kernel_funcs_utils-CSaumNDs.js";
4
3
  const t = 0.7978845608028654, r = 0.044715, c = i + `
5
4
  float x3 = x * x * x;
6
5
  float inner = x + ${r} * x3;