@genai-fi/nanogpt 0.10.3 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. package/dist/Generator.d.ts +10 -5
  2. package/dist/Generator.js +1789 -1765
  3. package/dist/{RealDiv-KAPDe8zB.js → RealDiv-Ds-jvL09.js} +22 -22
  4. package/dist/{Reshape-BYkmUnAv.js → Reshape-Cd6e-Otn.js} +1 -1
  5. package/dist/{Reshape-Zt6eb7yh.js → Reshape-Ct266DEk.js} +9 -9
  6. package/dist/TeachableLLM.d.ts +4 -3
  7. package/dist/TeachableLLM.js +14 -14
  8. package/dist/Trainer.d.ts +2 -2
  9. package/dist/Trainer.js +6 -6
  10. package/dist/{axis_util-BaG7mf5A.js → axis_util-DofAuy0p.js} +3 -3
  11. package/dist/backend.js +2 -2
  12. package/dist/{backend_util-RCe-rHaj.js → backend_util-C7NWHpv7.js} +7 -7
  13. package/dist/{backend_webgpu-DE3ACOLx.js → backend_webgpu-B0Vls736.js} +10 -10
  14. package/dist/{broadcast_to-B3eYlZm7.js → broadcast_to-DDaNMbX7.js} +2 -2
  15. package/dist/checks/appendCache.js +2 -2
  16. package/dist/checks/attentionMask.js +3 -3
  17. package/dist/checks/gelu.js +2 -2
  18. package/dist/checks/matMulGelu.js +2 -2
  19. package/dist/checks/normRMS.js +4 -4
  20. package/dist/checks/normRMSGrad.js +3 -3
  21. package/dist/checks/packUnpack.js +2 -2
  22. package/dist/checks/qkv.js +4 -4
  23. package/dist/checks/rope.js +2 -2
  24. package/dist/{clip_by_value-BnO7-a88.js → clip_by_value-Dn5tzexi.js} +4 -4
  25. package/dist/complex-DClmWqJt.js +11 -0
  26. package/dist/{concat-BV8bt5H-.js → concat-C6X3AAlQ.js} +1 -1
  27. package/dist/{concat_util-DpW8mL_l.js → concat_util-CHsJFZJJ.js} +1 -1
  28. package/dist/{dataset-BcwmTGYc.js → dataset-DcjWqUVQ.js} +7 -7
  29. package/dist/{dropout-BcvN9JYi.js → dropout-OxuaJz6z.js} +11 -11
  30. package/dist/{expand_dims-DT4tEPwA.js → expand_dims-BzfJK2uc.js} +3 -3
  31. package/dist/{exports_initializers-Hta_rEnm.js → exports_initializers-eS9QJ6ut.js} +1 -1
  32. package/dist/{floor-D5QdR_le.js → floor-DIb-lN_u.js} +1 -1
  33. package/dist/gather-BcO5UQNJ.js +9 -0
  34. package/dist/{gelu-CjNPL4OH.js → gelu-DqTbCx5x.js} +1 -1
  35. package/dist/{gpgpu_math-DAOmgtXR.js → gpgpu_math-CJcbnKPC.js} +2 -2
  36. package/dist/{index-DOvlwCh-.js → index-D0RBWjq8.js} +52 -52
  37. package/dist/{index-BwexR4lA.js → index-Dj5TkmPY.js} +89 -89
  38. package/dist/{kernel_funcs_utils-CCzYdUZg.js → kernel_funcs_utils-CSaumNDs.js} +11 -11
  39. package/dist/layers/BaseLayer.js +2 -2
  40. package/dist/layers/CausalSelfAttention.js +6 -6
  41. package/dist/layers/MLP.js +4 -4
  42. package/dist/layers/PositionEmbedding.js +5 -5
  43. package/dist/layers/RMSNorm.js +3 -3
  44. package/dist/layers/RoPECache.js +4 -4
  45. package/dist/layers/TiedEmbedding.js +6 -6
  46. package/dist/layers/TransformerBlock.js +1 -1
  47. package/dist/loader/loadTransformers.js +1 -1
  48. package/dist/loader/oldZipLoad.js +17 -17
  49. package/dist/log_sum_exp-VLZgbFAH.js +39 -0
  50. package/dist/main.d.ts +1 -1
  51. package/dist/main.js +9 -9
  52. package/dist/{matMul16-BWRSOCWB.js → matMul16-cDxwemKj.js} +7 -7
  53. package/dist/{matMulGelu-CzfgT6Wq.js → matMulGelu-B2s_80-H.js} +18 -18
  54. package/dist/{mat_mul-SjpJRLyL.js → mat_mul-DxpNTCRz.js} +3 -3
  55. package/dist/{mod-AnXEvvpo.js → mod-PrOKlFxH.js} +1 -1
  56. package/dist/models/NanoGPTV1.js +2 -2
  57. package/dist/models/model.js +9 -9
  58. package/dist/{ones-D2rT0xk2.js → ones-BX_wEgzB.js} +3 -3
  59. package/dist/ops/adamAdjust.js +1 -1
  60. package/dist/ops/adamMoments.js +1 -1
  61. package/dist/ops/add16.js +1 -1
  62. package/dist/ops/appendCache.js +3 -3
  63. package/dist/ops/attentionMask.js +1 -1
  64. package/dist/ops/concat16.js +2 -2
  65. package/dist/ops/cpu/adamAdjust.js +6 -6
  66. package/dist/ops/cpu/adamMoments.js +2 -2
  67. package/dist/ops/cpu/appendCache.js +5 -5
  68. package/dist/ops/cpu/attentionMask.js +10 -10
  69. package/dist/ops/cpu/fusedSoftmax.js +2 -2
  70. package/dist/ops/cpu/gatherSub.js +6 -6
  71. package/dist/ops/cpu/gelu.js +9 -9
  72. package/dist/ops/cpu/matMul16.js +2 -2
  73. package/dist/ops/cpu/matMulGelu.js +3 -3
  74. package/dist/ops/cpu/matMulMul.js +1 -1
  75. package/dist/ops/cpu/mulDropout.js +1 -1
  76. package/dist/ops/cpu/normRMS.js +3 -3
  77. package/dist/ops/cpu/qkv.js +3 -3
  78. package/dist/ops/cpu/rope.js +9 -9
  79. package/dist/ops/cpu/scatterSub.js +11 -11
  80. package/dist/ops/dot16.js +2 -2
  81. package/dist/ops/gatherSub.js +1 -1
  82. package/dist/ops/gelu.js +2 -2
  83. package/dist/ops/grads/add16.js +4 -4
  84. package/dist/ops/grads/attentionMask.js +2 -2
  85. package/dist/ops/grads/gelu.js +2 -2
  86. package/dist/ops/grads/matMul16.js +3 -3
  87. package/dist/ops/grads/matMulGelu.js +3 -3
  88. package/dist/ops/grads/normRMS.js +7 -7
  89. package/dist/ops/grads/pack16.js +3 -3
  90. package/dist/ops/grads/qkv.js +6 -6
  91. package/dist/ops/grads/rope.js +2 -2
  92. package/dist/ops/grads/softmax16.js +1 -1
  93. package/dist/ops/grads/unpack16.js +2 -2
  94. package/dist/ops/matMul16.js +3 -3
  95. package/dist/ops/matMulGelu.js +2 -2
  96. package/dist/ops/matMulMul.js +1 -1
  97. package/dist/ops/mul16.js +1 -1
  98. package/dist/ops/mulDrop.js +1 -1
  99. package/dist/ops/normRMS.js +1 -1
  100. package/dist/ops/pack16.js +2 -2
  101. package/dist/ops/qkv.js +1 -1
  102. package/dist/ops/reshape16.js +6 -6
  103. package/dist/ops/rope.js +2 -2
  104. package/dist/ops/scatterSub.js +1 -1
  105. package/dist/ops/slice16.js +2 -2
  106. package/dist/ops/softmax16.js +1 -1
  107. package/dist/ops/sub16.js +1 -1
  108. package/dist/ops/sum16.js +2 -2
  109. package/dist/ops/transpose16.js +3 -3
  110. package/dist/ops/unpack16.js +2 -2
  111. package/dist/ops/webgl/adamAdjust.js +2 -2
  112. package/dist/ops/webgl/adamMoments.js +1 -1
  113. package/dist/ops/webgl/appendCache.js +1 -1
  114. package/dist/ops/webgl/attentionMask.js +4 -4
  115. package/dist/ops/webgl/fusedSoftmax.js +6 -6
  116. package/dist/ops/webgl/gatherSub.js +1 -1
  117. package/dist/ops/webgl/gelu.js +2 -2
  118. package/dist/ops/webgl/log.js +3 -3
  119. package/dist/ops/webgl/matMul16.js +11 -11
  120. package/dist/ops/webgl/matMulGelu.js +4 -4
  121. package/dist/ops/webgl/matMulMul.js +7 -7
  122. package/dist/ops/webgl/mulDropout.js +1 -1
  123. package/dist/ops/webgl/normRMS.js +7 -7
  124. package/dist/ops/webgl/qkv.js +1 -1
  125. package/dist/ops/webgl/rope.js +4 -4
  126. package/dist/ops/webgl/scatterSub.js +1 -1
  127. package/dist/ops/webgpu/adamAdjust.js +3 -3
  128. package/dist/ops/webgpu/adamMoments.js +3 -3
  129. package/dist/ops/webgpu/add16.js +1 -1
  130. package/dist/ops/webgpu/appendCache.js +3 -3
  131. package/dist/ops/webgpu/attentionMask.js +5 -5
  132. package/dist/ops/webgpu/attentionMask32_program.js +2 -2
  133. package/dist/ops/webgpu/concat16.js +5 -5
  134. package/dist/ops/webgpu/gatherSub.js +5 -5
  135. package/dist/ops/webgpu/gelu.js +3 -3
  136. package/dist/ops/webgpu/matMul16.js +18 -18
  137. package/dist/ops/webgpu/matMul16_program.js +2 -2
  138. package/dist/ops/webgpu/mul16.js +4 -4
  139. package/dist/ops/webgpu/normRMS.js +6 -6
  140. package/dist/ops/webgpu/normRMSGrad.js +4 -4
  141. package/dist/ops/webgpu/pack16.js +1 -1
  142. package/dist/ops/webgpu/pack16_program.js +2 -2
  143. package/dist/ops/webgpu/qkv.js +6 -6
  144. package/dist/ops/webgpu/rope.js +3 -3
  145. package/dist/ops/webgpu/scatterSub.js +3 -3
  146. package/dist/ops/webgpu/slice16.js +4 -4
  147. package/dist/ops/webgpu/softmax16.js +2 -2
  148. package/dist/ops/webgpu/softmax16_program.js +2 -2
  149. package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
  150. package/dist/ops/webgpu/softmax16grad.js +1 -1
  151. package/dist/ops/webgpu/sub16.js +4 -4
  152. package/dist/ops/webgpu/sum16.js +6 -6
  153. package/dist/ops/webgpu/transpose16.js +2 -2
  154. package/dist/ops/webgpu/transpose16_program.js +2 -2
  155. package/dist/ops/webgpu/transpose16_shared_program.js +3 -3
  156. package/dist/ops/webgpu/unpack16.js +3 -3
  157. package/dist/ops/webgpu/utils/binary_op.js +3 -3
  158. package/dist/ops/webgpu/utils/reductions.js +4 -4
  159. package/dist/{ops-B5yanEdW.js → ops-FJapAPfm.js} +56 -56
  160. package/dist/{pack16-nQ6JaLo-.js → pack16-k4jq6aMX.js} +7 -7
  161. package/dist/patches/webgpu_backend.js +7 -7
  162. package/dist/patches/webgpu_base.js +1 -1
  163. package/dist/patches/webgpu_program.js +8 -8
  164. package/dist/{random_width-or-CEftb.js → random_width-UGQn4OWb.js} +33 -33
  165. package/dist/range-CuGvVN2c.js +10 -0
  166. package/dist/{relu-CP0ZcxWO.js → relu-Cf80uA2p.js} +1 -1
  167. package/dist/{reshape-ByE68wS9.js → reshape-CkjKPPqB.js} +1 -1
  168. package/dist/{resize_nearest_neighbor-B19mCEg2.js → resize_nearest_neighbor-DB8k9KN_.js} +43 -43
  169. package/dist/{rope-Ir4mTyD1.js → rope-BmZmp9uP.js} +1 -1
  170. package/dist/{scatter_nd_util-lvSiX8q4.js → scatter_nd_util-BY22Cc-C.js} +1 -1
  171. package/dist/{selu_util-kbhpTdYD.js → selu_util-BuLbmbrl.js} +5 -5
  172. package/dist/{shared-DT1TkE6w.js → shared-B7USJZgw.js} +1 -1
  173. package/dist/{shared-dntlHIDQ.js → shared-BQboIImQ.js} +86 -86
  174. package/dist/{slice-BfEGSH82.js → slice-Aqy7KbJh.js} +3 -3
  175. package/dist/{slice_util-uTKwiEpW.js → slice_util-D8CQRenR.js} +7 -7
  176. package/dist/{softmax-CA5jFsLR.js → softmax-faLoUZVT.js} +1 -1
  177. package/dist/{split-CVLc0w--.js → split-BNz5jcGc.js} +3 -3
  178. package/dist/{squeeze-C7Z2srUo.js → squeeze--YMgaAAf.js} +2 -2
  179. package/dist/{stack-Cf4n9h0N.js → stack-WJK22CFn.js} +1 -1
  180. package/dist/{step-CINUs5QB.js → step-dXR33iOg.js} +32 -32
  181. package/dist/sum-BdplSvq_.js +11 -0
  182. package/dist/tensor-BQqrDvpx.js +8 -0
  183. package/dist/tensor1d-LxP9asMm.js +11 -0
  184. package/dist/{tensor2d-Bs9wZRc7.js → tensor2d-BN1sSfQO.js} +3 -3
  185. package/dist/{tensor4d-BARPdTaS.js → tensor4d-DVwr7pLF.js} +1 -1
  186. package/dist/{tfjs_backend-y1cvNhLA.js → tfjs_backend-Vi4JfLzT.js} +28 -28
  187. package/dist/{tile-mbfagpsB.js → tile-CvN_LyVr.js} +4 -4
  188. package/dist/tokeniser/BaseTokeniser.d.ts +27 -0
  189. package/dist/tokeniser/BaseTokeniser.js +94 -0
  190. package/dist/tokeniser/CharTokeniser.d.ts +4 -3
  191. package/dist/tokeniser/CharTokeniser.js +46 -32
  192. package/dist/tokeniser/bpe.d.ts +4 -3
  193. package/dist/tokeniser/bpe.js +60 -45
  194. package/dist/tokeniser/type.d.ts +11 -0
  195. package/dist/training/Adam.js +2 -2
  196. package/dist/training/AdamExt.js +1 -1
  197. package/dist/training/DatasetBuilder.d.ts +2 -2
  198. package/dist/training/DatasetBuilder.js +32 -36
  199. package/dist/training/FullTrainer.js +1 -1
  200. package/dist/training/Trainer.d.ts +3 -3
  201. package/dist/training/Trainer.js +2 -2
  202. package/dist/training/sparseCrossEntropy.js +3 -3
  203. package/dist/{transpose-ClWiBS_b.js → transpose-JawVKyZy.js} +5 -5
  204. package/dist/{unsorted_segment_sum-BDDhB_E6.js → unsorted_segment_sum-LAbmE9G4.js} +78 -78
  205. package/dist/utilities/dummy.js +3 -3
  206. package/dist/utilities/multinomialCPU.js +2 -2
  207. package/dist/utilities/packed.js +1 -1
  208. package/dist/utilities/performance.js +1 -1
  209. package/dist/utilities/profile.js +1 -1
  210. package/dist/utilities/safetensors.js +2 -2
  211. package/dist/utilities/sentences.js +5 -5
  212. package/dist/utilities/weights.js +2 -2
  213. package/dist/{variable-WawDEaAb.js → variable-DQ9yYgEU.js} +1 -1
  214. package/dist/{webgpu_program-DuOXPQol.js → webgpu_program-CAE4RICo.js} +3 -3
  215. package/dist/{webgpu_util-RxEF33Rj.js → webgpu_util-BdovYhXr.js} +1 -1
  216. package/dist/{zeros-KnWaWf-X.js → zeros-DeiE2zTa.js} +2 -2
  217. package/dist/{zeros_like-DvE73F4e.js → zeros_like-BAz3iKru.js} +77 -77
  218. package/package.json +1 -1
  219. package/dist/complex-DjxcVmoX.js +0 -11
  220. package/dist/gather-D3JcZUaI.js +0 -9
  221. package/dist/log_sum_exp-ngO0-4pK.js +0 -39
  222. package/dist/range-BklejeeW.js +0 -10
  223. package/dist/sum-DWAtNGez.js +0 -11
  224. package/dist/tensor-DJoc7gJU.js +0 -8
  225. package/dist/tensor1d-D11P_7Dp.js +0 -11
@@ -1,18 +1,18 @@
1
- import { A as u, B as c, E as g, aj as p, p as h, c as m } from "../../index-DOvlwCh-.js";
2
- import { r as l } from "../../range-BklejeeW.js";
3
- import { s as N } from "../../stack-Cf4n9h0N.js";
1
+ import { q as u, u as c, E as g, aj as h, h as m, c as p } from "../../index-D0RBWjq8.js";
2
+ import { r as l } from "../../range-CuGvVN2c.js";
3
+ import { s as N } from "../../stack-WJK22CFn.js";
4
4
  function f(e, t) {
5
5
  const n = c(t, "indices", "gatherND", "int32"), s = { params: c(e, "x", "gatherND", "string_or_numeric"), indices: n };
6
- return g.runKernel(p, s);
6
+ return g.runKernel(h, s);
7
7
  }
8
8
  const b = /* @__PURE__ */ u({ gatherND_: f });
9
9
  function d(e) {
10
10
  const { values: t, labels: n, logits: r } = e.inputs, s = n.shape[0], a = l(0, s, 1, "int32"), i = N([a, n], 1), o = b(r, i);
11
- return m(t, o);
11
+ return p(t, o);
12
12
  }
13
13
  const k = {
14
14
  kernelName: "EfficientGatherSub",
15
15
  backendName: "cpu",
16
16
  kernelFunc: d
17
17
  };
18
- h(k);
18
+ m(k);
@@ -1,4 +1,4 @@
1
- import { p as t, t as d } from "../../index-DOvlwCh-.js";
1
+ import { h as t, t as d } from "../../index-D0RBWjq8.js";
2
2
  const o = 0.7978845608028654, c = 0.044715;
3
3
  function m(r) {
4
4
  const { inputs: u } = r, { x: n } = u, e = n;
@@ -7,12 +7,12 @@ function m(r) {
7
7
  return e.mul(s);
8
8
  });
9
9
  }
10
- const N = {
10
+ const p = {
11
11
  kernelName: "Gelu",
12
12
  backendName: "cpu",
13
13
  kernelFunc: m
14
14
  };
15
- t(N);
15
+ t(p);
16
16
  const K = {
17
17
  kernelName: "Gelu",
18
18
  backendName: "tensorflow",
@@ -22,19 +22,19 @@ t(K);
22
22
  function i(r) {
23
23
  const { dy: u, x: n } = r.inputs;
24
24
  return d(() => {
25
- const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), p = g.add(G);
26
- return u.mul(p);
25
+ const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), N = g.add(G);
26
+ return u.mul(N);
27
27
  });
28
28
  }
29
- const x = {
29
+ const h = {
30
30
  kernelName: "GeluGrad",
31
31
  backendName: "cpu",
32
32
  kernelFunc: i
33
33
  };
34
- t(x);
35
- const h = {
34
+ t(h);
35
+ const x = {
36
36
  kernelName: "GeluGrad",
37
37
  backendName: "tensorflow",
38
38
  kernelFunc: i
39
39
  };
40
- t(h);
40
+ t(x);
@@ -1,6 +1,6 @@
1
1
  import { isPackedTensor as t } from "../../utilities/packed.js";
2
- import { p } from "../../index-DOvlwCh-.js";
3
- import { m as l } from "../../mat_mul-SjpJRLyL.js";
2
+ import { h as p } from "../../index-D0RBWjq8.js";
3
+ import { m as l } from "../../mat_mul-DxpNTCRz.js";
4
4
  function m(e) {
5
5
  const { A: n, B: r } = e.inputs, { transposeA: o, transposeB: s } = e.attrs, a = !t(n), c = !t(r);
6
6
  if (a && c)
@@ -1,6 +1,6 @@
1
- import { p as e, t as m } from "../../index-DOvlwCh-.js";
2
- import { g as M, d as i } from "../../gelu-CjNPL4OH.js";
3
- import { m as k } from "../../mat_mul-SjpJRLyL.js";
1
+ import { h as e, t as m } from "../../index-D0RBWjq8.js";
2
+ import { g as M, d as i } from "../../gelu-DqTbCx5x.js";
3
+ import { m as k } from "../../mat_mul-DxpNTCRz.js";
4
4
  function c(t) {
5
5
  const { inputs: u } = t, { x: n, kernel: r } = u, a = n, l = r;
6
6
  return m(() => {
@@ -1,4 +1,4 @@
1
- import { p as e, t as i } from "../../index-DOvlwCh-.js";
1
+ import { h as e, t as i } from "../../index-D0RBWjq8.js";
2
2
  function n(t) {
3
3
  const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, k = a, M = c;
4
4
  return i(() => m.matMul(k, o, s).mul(M));
@@ -1,4 +1,4 @@
1
- import { p as e, m as t } from "../../index-DOvlwCh-.js";
1
+ import { h as e, m as t } from "../../index-D0RBWjq8.js";
2
2
  function n(o) {
3
3
  const { inputs: r } = o, { a: l, b: u } = r;
4
4
  return console.warn("Using fallback mulDrop implementation without dropout."), t(l, u);
@@ -1,4 +1,4 @@
1
- import { p as o, t as d } from "../../index-DOvlwCh-.js";
1
+ import { h as o, t as d } from "../../index-D0RBWjq8.js";
2
2
  function i(t) {
3
3
  const { inputs: e } = t, { x: n, gamma: s } = e, r = n, a = s;
4
4
  return d(() => {
@@ -31,9 +31,9 @@ const S = {
31
31
  kernelFunc: N
32
32
  };
33
33
  o(S);
34
- const p = {
34
+ const R = {
35
35
  kernelName: "RMSNormGrad",
36
36
  backendName: "tensorflow",
37
37
  kernelFunc: N
38
38
  };
39
- o(p);
39
+ o(R);
@@ -1,6 +1,6 @@
1
- import { p as q } from "../../index-DOvlwCh-.js";
2
- import { r as o } from "../../reshape-ByE68wS9.js";
3
- import { s as x } from "../../split-CVLc0w--.js";
1
+ import { h as q } from "../../index-D0RBWjq8.js";
2
+ import { r as o } from "../../reshape-CkjKPPqB.js";
3
+ import { s as x } from "../../split-BNz5jcGc.js";
4
4
  function v(p) {
5
5
  const { x: c, kernel: K } = p.inputs, { heads: n, packed: C } = p.attrs;
6
6
  if (C)
@@ -1,20 +1,20 @@
1
- import { p as I } from "../../index-DOvlwCh-.js";
2
- import { r as y } from "../../range-BklejeeW.js";
3
- import { g as F } from "../../gather-D3JcZUaI.js";
4
- import { s as E } from "../../stack-Cf4n9h0N.js";
5
- import { c as T } from "../../concat-BV8bt5H-.js";
1
+ import { h as I } from "../../index-D0RBWjq8.js";
2
+ import { r as y } from "../../range-CuGvVN2c.js";
3
+ import { g as F } from "../../gather-BcO5UQNJ.js";
4
+ import { s as E } from "../../stack-WJK22CFn.js";
5
+ import { c as T } from "../../concat-C6X3AAlQ.js";
6
6
  function U(c, r, p, e, n) {
7
7
  const t = e.shape[3], s = p;
8
8
  if (s > t) return e;
9
- const o = e.shape[2], i = s / 2, a = r.slice([n, 0, 0], [o, i, 1]).reshape([1, 1, o, i]), d = c.slice([n, 0, 0], [o, i, 1]).reshape([1, 1, o, i]), l = e.shape[0], m = e.shape[1], h = y(0, s, 2, "int32"), g = y(1, s, 2, "int32"), D = ((k) => {
10
- const C = k.slice([0, 0, 0, 0], [l, m, o, s]), R = s < t ? k.slice([0, 0, 0, s], [l, m, o, t - s]) : null, u = F(C, h, 3), f = F(C, g, 3), v = u.mul(a), N = f.mul(d), S = v.sub(N), P = f.mul(a), b = u.mul(d), x = P.add(b);
11
- u.dispose(), f.dispose(), a.dispose(), d.dispose(), v.dispose(), N.dispose(), P.dispose(), b.dispose();
9
+ const o = e.shape[2], i = s / 2, a = r.slice([n, 0, 0], [o, i, 1]).reshape([1, 1, o, i]), d = c.slice([n, 0, 0], [o, i, 1]).reshape([1, 1, o, i]), l = e.shape[0], m = e.shape[1], f = y(0, s, 2, "int32"), g = y(1, s, 2, "int32"), D = ((k) => {
10
+ const C = k.slice([0, 0, 0, 0], [l, m, o, s]), R = s < t ? k.slice([0, 0, 0, s], [l, m, o, t - s]) : null, u = F(C, f, 3), h = F(C, g, 3), v = u.mul(a), N = h.mul(d), S = v.sub(N), P = h.mul(a), b = u.mul(d), x = P.add(b);
11
+ u.dispose(), h.dispose(), a.dispose(), d.dispose(), v.dispose(), N.dispose(), P.dispose(), b.dispose();
12
12
  const K = E([S, x], -1);
13
13
  S.dispose(), x.dispose();
14
14
  const w = K.reshape([l, m, o, s]);
15
15
  return K.dispose(), R ? T([w, R], 3) : w;
16
16
  })(e);
17
- return h.dispose(), g.dispose(), D;
17
+ return f.dispose(), g.dispose(), D;
18
18
  }
19
19
  function B(c) {
20
20
  const { x: r } = c.inputs, { pastLen: p, negSin: e, ropeCache: n } = c.attrs, t = r.shape[3], s = e ? n.getNegSin() : n.getSin(), o = n.getCos();
@@ -1,8 +1,8 @@
1
- import { A as f, C as g, B as r, E as l, ai as N, p as b, c as S, m as h } from "../../index-DOvlwCh-.js";
2
- import { v as D } from "../../scatter_nd_util-lvSiX8q4.js";
3
- import { r as k } from "../../range-BklejeeW.js";
4
- import { s as v } from "../../stack-Cf4n9h0N.js";
5
- import { o as E } from "../../ones-D2rT0xk2.js";
1
+ import { q as f, w as g, u as r, E as l, ai as N, h as b, c as S, m as h } from "../../index-D0RBWjq8.js";
2
+ import { v as D } from "../../scatter_nd_util-BY22Cc-C.js";
3
+ import { r as k } from "../../range-CuGvVN2c.js";
4
+ import { s as v } from "../../stack-WJK22CFn.js";
5
+ import { o as E } from "../../ones-BX_wEgzB.js";
6
6
  function I(a, e, s) {
7
7
  g(s);
8
8
  const n = r(a, "indices", "scatterND", "int32"), t = r(e, "updates", "scatterND");
@@ -10,14 +10,14 @@ function I(a, e, s) {
10
10
  const c = { indices: n, updates: t }, o = { shape: s };
11
11
  return l.runKernel(N, c, o);
12
12
  }
13
- const C = /* @__PURE__ */ f({ scatterND_: I });
14
- function K(a) {
15
- const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u = C(i, d, [t, c]), p = S(e, u), m = n.reshape([t, 1]);
13
+ const K = /* @__PURE__ */ f({ scatterND_: I });
14
+ function L(a) {
15
+ const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u = K(i, d, [t, c]), p = S(e, u), m = n.reshape([t, 1]);
16
16
  return h(p, m);
17
17
  }
18
- const L = {
18
+ const T = {
19
19
  kernelName: "EfficientScatterSub",
20
20
  backendName: "cpu",
21
- kernelFunc: K
21
+ kernelFunc: L
22
22
  };
23
- b(L);
23
+ b(T);
package/dist/ops/dot16.js CHANGED
@@ -1,8 +1,8 @@
1
- import { b as d } from "../matMul16-BWRSOCWB.js";
1
+ import { b as d } from "../matMul16-cDxwemKj.js";
2
2
  import { transpose16 as w } from "./transpose16.js";
3
3
  import { reshape16 as n } from "./reshape16.js";
4
4
  import { isPackedTensor as p } from "../utilities/packed.js";
5
- import { d as x } from "../tfjs_backend-y1cvNhLA.js";
5
+ import { d as x } from "../tfjs_backend-Vi4JfLzT.js";
6
6
  function E(e, s, h = !1, c = !1) {
7
7
  if (!p(e) && !p(s))
8
8
  return x(e, s);
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-DOvlwCh-.js";
1
+ import { e as n } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/gatherSub.js";
3
3
  import "./webgl/gatherSub.js";
4
4
  function f(r, e, t) {
package/dist/ops/gelu.js CHANGED
@@ -1,7 +1,7 @@
1
- import "../index-DOvlwCh-.js";
1
+ import "../index-D0RBWjq8.js";
2
2
  import "./cpu/gelu.js";
3
3
  import "./webgl/gelu.js";
4
- import { d as e, g as i } from "../gelu-CjNPL4OH.js";
4
+ import { d as e, g as i } from "../gelu-DqTbCx5x.js";
5
5
  export {
6
6
  e as dGelu,
7
7
  i as gelu
@@ -1,11 +1,11 @@
1
- import { u as i, a3 as h, a4 as d } from "../../index-DOvlwCh-.js";
1
+ import { j as u, $ as h, a0 as d } from "../../index-D0RBWjq8.js";
2
2
  import { sum16 as c } from "../sum16.js";
3
3
  import { reshape16 as p } from "../reshape16.js";
4
4
  const A = {
5
5
  kernelName: "Add16",
6
6
  inputsToSave: ["a", "b"],
7
- gradFunc: (s, u) => {
8
- const [t, a] = u, n = h(t.shape, a.shape);
7
+ gradFunc: (s, i) => {
8
+ const [t, a] = i, n = h(t.shape, a.shape);
9
9
  if (Array.isArray(s))
10
10
  throw new Error("Add16 gradFunc expected dy to be a Tensor but got an array");
11
11
  return { a: () => {
@@ -23,4 +23,4 @@ const A = {
23
23
  } };
24
24
  }
25
25
  };
26
- i(A);
26
+ u(A);
@@ -1,5 +1,5 @@
1
- import { u as m } from "../../index-DOvlwCh-.js";
2
- import { m as o } from "../../matMul16-BWRSOCWB.js";
1
+ import { j as m } from "../../index-D0RBWjq8.js";
2
+ import { m as o } from "../../matMul16-cDxwemKj.js";
3
3
  import { transpose16 as c } from "../transpose16.js";
4
4
  const l = {
5
5
  kernelName: "AttentionMask",
@@ -1,5 +1,5 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { a as e } from "../../gelu-CjNPL4OH.js";
1
+ import "../../index-D0RBWjq8.js";
2
+ import { a as e } from "../../gelu-DqTbCx5x.js";
3
3
  export {
4
4
  e as geluGradConfig
5
5
  };
@@ -1,6 +1,6 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { a } from "../../matMul16-BWRSOCWB.js";
3
- import "../../gelu-CjNPL4OH.js";
1
+ import "../../index-D0RBWjq8.js";
2
+ import { a } from "../../matMul16-cDxwemKj.js";
3
+ import "../../gelu-DqTbCx5x.js";
4
4
  import "../transpose16.js";
5
5
  import "../reshape16.js";
6
6
  export {
@@ -1,4 +1,4 @@
1
- import { u as a, e as o } from "../../index-DOvlwCh-.js";
1
+ import { j as a, e as o } from "../../index-D0RBWjq8.js";
2
2
  function s(e, n, r) {
3
3
  return o().runKernel("MatMulGeluGrad", { dy: e, x: n, kernel: r });
4
4
  }
@@ -7,9 +7,9 @@ const d = {
7
7
  inputsToSave: ["x", "kernel"],
8
8
  outputsToSave: [],
9
9
  gradFunc: (e, n) => {
10
- const [r, u] = n, [t, l] = s(e, r, u);
10
+ const [r, t] = n, [u, l] = s(e, r, t);
11
11
  return {
12
- x: () => t,
12
+ x: () => u,
13
13
  kernel: () => l
14
14
  };
15
15
  }
@@ -1,20 +1,20 @@
1
- import { u as t, e as u } from "../../index-DOvlwCh-.js";
2
- function g(r, a, n) {
3
- return u().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
1
+ import { j as t, e as g } from "../../index-D0RBWjq8.js";
2
+ function s(r, a, n) {
3
+ return g().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
4
4
  }
5
- const s = {
5
+ const u = {
6
6
  kernelName: "RMSNorm",
7
7
  inputsToSave: ["x", "gamma"],
8
8
  outputsToSave: [],
9
9
  gradFunc: (r, a) => {
10
- const [n, e] = a, [m, o] = g(r, n, e);
10
+ const [n, e] = a, [m, o] = s(r, n, e);
11
11
  return {
12
12
  x: () => m,
13
13
  gamma: () => o
14
14
  };
15
15
  }
16
16
  };
17
- t(s);
17
+ t(u);
18
18
  export {
19
- s as normRMSGradConfig
19
+ u as normRMSGradConfig
20
20
  };
@@ -1,6 +1,6 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { b as i } from "../../pack16-nQ6JaLo-.js";
3
- import "../../slice-BfEGSH82.js";
1
+ import "../../index-D0RBWjq8.js";
2
+ import { b as i } from "../../pack16-k4jq6aMX.js";
3
+ import "../../slice-Aqy7KbJh.js";
4
4
  export {
5
5
  i as packGradConfig
6
6
  };
@@ -1,8 +1,8 @@
1
- import { u as c } from "../../index-DOvlwCh-.js";
2
- import { a as f } from "../../matMul16-BWRSOCWB.js";
1
+ import { j as u } from "../../index-D0RBWjq8.js";
2
+ import { a as f } from "../../matMul16-cDxwemKj.js";
3
3
  import { concat16 as g } from "../concat16.js";
4
4
  import { sum16 as l } from "../sum16.js";
5
- import { s as k } from "../../squeeze-C7Z2srUo.js";
5
+ import { s as k } from "../../squeeze--YMgaAAf.js";
6
6
  const i = {
7
7
  kernelName: "QKV",
8
8
  inputsToSave: ["x", "kernel"],
@@ -19,8 +19,8 @@ const i = {
19
19
  return p.dispose(), {
20
20
  x: () => d.A(),
21
21
  kernel: () => {
22
- const o = d.B(), u = o.shape[0] === 1 ? k(o, [0]) : l(o, 0);
23
- return o.dispose(), u;
22
+ const o = d.B(), c = o.shape[0] === 1 ? k(o, [0]) : l(o, 0);
23
+ return o.dispose(), c;
24
24
  }
25
25
  };
26
26
  }
@@ -28,7 +28,7 @@ const i = {
28
28
  function B(e, s, r) {
29
29
  return i.gradFunc(e, [s, r], {});
30
30
  }
31
- c(i);
31
+ u(i);
32
32
  export {
33
33
  B as qkvGrad
34
34
  };
@@ -1,5 +1,5 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { a as p } from "../../rope-Ir4mTyD1.js";
1
+ import "../../index-D0RBWjq8.js";
2
+ import { a as p } from "../../rope-BmZmp9uP.js";
3
3
  export {
4
4
  p as ropeGradConfig
5
5
  };
@@ -1,4 +1,4 @@
1
- import { u as n, e as a } from "../../index-DOvlwCh-.js";
1
+ import { j as n, e as a } from "../../index-D0RBWjq8.js";
2
2
  import { isPackedTensor as t } from "../../utilities/packed.js";
3
3
  function s(r, e) {
4
4
  return a().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
@@ -1,5 +1,5 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { a as p } from "../../pack16-nQ6JaLo-.js";
1
+ import "../../index-D0RBWjq8.js";
2
+ import { a as p } from "../../pack16-k4jq6aMX.js";
3
3
  export {
4
4
  p as unpackGradConfig
5
5
  };
@@ -1,9 +1,9 @@
1
- import "../index-DOvlwCh-.js";
2
- import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-BWRSOCWB.js";
1
+ import "../index-D0RBWjq8.js";
2
+ import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-cDxwemKj.js";
3
3
  import "./webgl/matMul16.js";
4
4
  import "./cpu/matMul16.js";
5
5
  import "../utilities/packed.js";
6
- import "../pack16-nQ6JaLo-.js";
6
+ import "../pack16-k4jq6aMX.js";
7
7
  export {
8
8
  p as matMul16,
9
9
  u as matMul16Gelu,
@@ -1,6 +1,6 @@
1
- import { e as u } from "../index-DOvlwCh-.js";
1
+ import { e as u } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/matMulGelu.js";
3
- import "../matMulGelu-CzfgT6Wq.js";
3
+ import "../matMulGelu-B2s_80-H.js";
4
4
  import "./grads/matMulGelu.js";
5
5
  function M(r, e) {
6
6
  return u().runKernel("MatMulGelu", { x: r, kernel: e });
@@ -1,4 +1,4 @@
1
- import { e as u } from "../index-DOvlwCh-.js";
1
+ import { e as u } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/matMulMul.js";
3
3
  import "./webgl/matMulMul.js";
4
4
  function m(e, r, t, l = !1, n = !1) {
package/dist/ops/mul16.js CHANGED
@@ -1,4 +1,4 @@
1
- import { m as t, e as u } from "../index-DOvlwCh-.js";
1
+ import { m as t, e as u } from "../index-D0RBWjq8.js";
2
2
  import { isPackedTensor as n } from "../utilities/packed.js";
3
3
  function i(r, e) {
4
4
  return !n(r) && !n(e) ? t(r, e) : u().runKernel("Mul16", { a: r, b: e });
@@ -1,4 +1,4 @@
1
- import { e as t } from "../index-DOvlwCh-.js";
1
+ import { e as t } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/mulDropout.js";
3
3
  import "./webgl/mulDropout.js";
4
4
  function m(r, o, e, n) {
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-DOvlwCh-.js";
1
+ import { e as n } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/normRMS.js";
3
3
  import "./webgl/normRMS.js";
4
4
  import "./grads/normRMS.js";
@@ -1,5 +1,5 @@
1
- import "../index-DOvlwCh-.js";
2
- import { p as a } from "../pack16-nQ6JaLo-.js";
1
+ import "../index-D0RBWjq8.js";
2
+ import { p as a } from "../pack16-k4jq6aMX.js";
3
3
  export {
4
4
  a as pack16
5
5
  };
package/dist/ops/qkv.js CHANGED
@@ -1,4 +1,4 @@
1
- import { e as t } from "../index-DOvlwCh-.js";
1
+ import { e as t } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/qkv.js";
3
3
  import "./webgl/qkv.js";
4
4
  import "./grads/qkv.js";
@@ -1,5 +1,5 @@
1
- import { u as p, p as s, e as u } from "../index-DOvlwCh-.js";
2
- import { r as c } from "../reshape-ByE68wS9.js";
1
+ import { j as p, h as s, e as u } from "../index-D0RBWjq8.js";
2
+ import { r as c } from "../reshape-CkjKPPqB.js";
3
3
  const i = {
4
4
  kernelName: "Reshape16",
5
5
  inputsToSave: ["x"],
@@ -21,18 +21,18 @@ const l = {
21
21
  kernelFunc: a
22
22
  };
23
23
  s(l);
24
- const g = {
24
+ const h = {
25
25
  kernelName: "Reshape16",
26
26
  backendName: "webgl",
27
27
  kernelFunc: a
28
28
  };
29
- s(g);
30
- const h = {
29
+ s(h);
30
+ const g = {
31
31
  kernelName: "Reshape16",
32
32
  backendName: "cpu",
33
33
  kernelFunc: a
34
34
  };
35
- s(h);
35
+ s(g);
36
36
  function m(e, r) {
37
37
  return u().runKernel("Reshape16", { x: e }, { shape: r });
38
38
  }
package/dist/ops/rope.js CHANGED
@@ -1,7 +1,7 @@
1
- import "../index-DOvlwCh-.js";
1
+ import "../index-D0RBWjq8.js";
2
2
  import "./cpu/rope.js";
3
3
  import "./webgl/rope.js";
4
- import { r as i } from "../rope-Ir4mTyD1.js";
4
+ import { r as i } from "../rope-BmZmp9uP.js";
5
5
  export {
6
6
  i as rope
7
7
  };
@@ -1,4 +1,4 @@
1
- import { e as i } from "../index-DOvlwCh-.js";
1
+ import { e as i } from "../index-D0RBWjq8.js";
2
2
  import "./cpu/scatterSub.js";
3
3
  import "./webgl/scatterSub.js";
4
4
  function c(t, r, e) {
@@ -1,6 +1,6 @@
1
1
  import { isPackedTensor as n } from "../utilities/packed.js";
2
- import { e as c } from "../index-DOvlwCh-.js";
3
- import { s as i } from "../slice-BfEGSH82.js";
2
+ import { e as c } from "../index-D0RBWjq8.js";
3
+ import { s as i } from "../slice-Aqy7KbJh.js";
4
4
  function a(r, e, o) {
5
5
  return n(r) ? c().runKernel("Slice16", { x: r }, { begin: e, size: o }) : i(r, e, o);
6
6
  }
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-DOvlwCh-.js";
1
+ import { e as n } from "../index-D0RBWjq8.js";
2
2
  import "./grads/softmax16.js";
3
3
  import { isPackedTensor as e } from "../utilities/packed.js";
4
4
  function t(r) {
package/dist/ops/sub16.js CHANGED
@@ -1,4 +1,4 @@
1
- import { c as s, e as t } from "../index-DOvlwCh-.js";
1
+ import { c as s, e as t } from "../index-D0RBWjq8.js";
2
2
  import { isPackedTensor as n } from "../utilities/packed.js";
3
3
  function c(r, e) {
4
4
  return !n(r) && !n(e) ? s(r, e) : t().runKernel("Sub16", { a: r, b: e });
package/dist/ops/sum16.js CHANGED
@@ -1,6 +1,6 @@
1
- import { e as t } from "../index-DOvlwCh-.js";
1
+ import { e as t } from "../index-D0RBWjq8.js";
2
2
  import { isPackedTensor as s } from "../utilities/packed.js";
3
- import { s as n } from "../sum-DWAtNGez.js";
3
+ import { s as n } from "../sum-BdplSvq_.js";
4
4
  function p(r, o, e = !1) {
5
5
  if (!s(r))
6
6
  return n(r, o, e);
@@ -1,8 +1,8 @@
1
- import { u as i, p, e as u } from "../index-DOvlwCh-.js";
1
+ import { j as i, h as p, e as u } from "../index-D0RBWjq8.js";
2
2
  import { forcePacked as l, forceFloat as m } from "./grads/utils.js";
3
- import { g } from "../axis_util-BaG7mf5A.js";
3
+ import { g } from "../axis_util-DofAuy0p.js";
4
4
  import { isPackedTensor as f } from "../utilities/packed.js";
5
- import { t as a } from "../transpose-ClWiBS_b.js";
5
+ import { t as a } from "../transpose-JawVKyZy.js";
6
6
  const d = {
7
7
  kernelName: "Transpose16",
8
8
  gradFunc: (r, s, t) => {
@@ -1,5 +1,5 @@
1
- import "../index-DOvlwCh-.js";
2
- import { u as t } from "../pack16-nQ6JaLo-.js";
1
+ import "../index-D0RBWjq8.js";
2
+ import { u as t } from "../pack16-k4jq6aMX.js";
3
3
  import "../utilities/packed.js";
4
4
  export {
5
5
  t as unpack16
@@ -1,5 +1,5 @@
1
- import { r as n } from "../../Reshape-Zt6eb7yh.js";
2
- import { p as f } from "../../index-DOvlwCh-.js";
1
+ import { r as n } from "../../Reshape-Ct266DEk.js";
2
+ import { h as f } from "../../index-D0RBWjq8.js";
3
3
  class v {
4
4
  variableNames = ["moments", "value"];
5
5
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p as m } from "../../index-DOvlwCh-.js";
1
+ import { h as m } from "../../index-D0RBWjq8.js";
2
2
  class i {
3
3
  variableNames = ["moments", "gradient"];
4
4
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p } from "../../index-DOvlwCh-.js";
1
+ import { h as p } from "../../index-D0RBWjq8.js";
2
2
  class m {
3
3
  variableNames = ["cache", "item"];
4
4
  outputShape;
@@ -1,5 +1,5 @@
1
- import { p as m } from "../../index-DOvlwCh-.js";
2
- class h {
1
+ import { h } from "../../index-D0RBWjq8.js";
2
+ class m {
3
3
  variableNames = ["q", "k"];
4
4
  outputShape;
5
5
  userCode;
@@ -34,7 +34,7 @@ class h {
34
34
  }
35
35
  }
36
36
  function l(o) {
37
- const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], d = new h(i, u, r, c, p);
37
+ const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], d = new m(i, u, r, c, p);
38
38
  return a.runWebGLProgram(d, [t, e], "float32", [[s], [n], [Number.NEGATIVE_INFINITY]]);
39
39
  }
40
40
  const f = {
@@ -42,4 +42,4 @@ const f = {
42
42
  backendName: "webgl",
43
43
  kernelFunc: l
44
44
  };
45
- m(f);
45
+ h(f);