@genai-fi/nanogpt 0.10.3 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (238) hide show
  1. package/dist/Generator.d.ts +10 -5
  2. package/dist/Generator.js +1789 -1765
  3. package/dist/{RealDiv-KAPDe8zB.js → RealDiv-C8neBwFi.js} +15 -15
  4. package/dist/{Reshape-BYkmUnAv.js → Reshape-Bd4V_4X7.js} +1 -1
  5. package/dist/{Reshape-Zt6eb7yh.js → Reshape-Ck29jQSY.js} +5 -5
  6. package/dist/TeachableLLM.d.ts +5 -3
  7. package/dist/TeachableLLM.js +14 -14
  8. package/dist/Trainer.d.ts +3 -1
  9. package/dist/Trainer.js +11 -8
  10. package/dist/{axis_util-BaG7mf5A.js → axis_util-DGqbT-FX.js} +3 -3
  11. package/dist/backend.js +2 -2
  12. package/dist/{backend_util-RCe-rHaj.js → backend_util-DC3rBo_H.js} +18 -18
  13. package/dist/{backend_webgpu-DE3ACOLx.js → backend_webgpu-mbhNnlx9.js} +3 -3
  14. package/dist/{broadcast_to-B3eYlZm7.js → broadcast_to-D1Dmg2Oz.js} +2 -2
  15. package/dist/checks/appendCache.js +2 -2
  16. package/dist/checks/attentionMask.js +3 -3
  17. package/dist/checks/gelu.js +2 -2
  18. package/dist/checks/matMulGelu.js +2 -2
  19. package/dist/checks/normRMS.js +4 -4
  20. package/dist/checks/normRMSGrad.js +3 -3
  21. package/dist/checks/packUnpack.js +2 -2
  22. package/dist/checks/qkv.js +4 -4
  23. package/dist/checks/rope.js +2 -2
  24. package/dist/{clip_by_value-BnO7-a88.js → clip_by_value-fg2aKzUy.js} +5 -5
  25. package/dist/complex-Cyg-eQeZ.js +11 -0
  26. package/dist/concat-CSm2rMwe.js +17 -0
  27. package/dist/{concat_util-DpW8mL_l.js → concat_util-D0je5Ppu.js} +1 -1
  28. package/dist/{dataset-BcwmTGYc.js → dataset-CVIJu7Xa.js} +7 -7
  29. package/dist/{dropout-BcvN9JYi.js → dropout-DLhSMNTZ.js} +9 -9
  30. package/dist/expand_dims-ChkuOp6I.js +11 -0
  31. package/dist/{exports_initializers-Hta_rEnm.js → exports_initializers-1KWPiStI.js} +1 -1
  32. package/dist/{floor-D5QdR_le.js → floor-BRMPgeIs.js} +1 -1
  33. package/dist/{gather-D3JcZUaI.js → gather-BSULDalH.js} +1 -1
  34. package/dist/{gelu-CjNPL4OH.js → gelu-BK1k-n1i.js} +1 -1
  35. package/dist/{gpgpu_math-DAOmgtXR.js → gpgpu_math-BJSTk_mW.js} +25 -25
  36. package/dist/{index-BwexR4lA.js → index-BBVLAXZD.js} +89 -89
  37. package/dist/{index-DOvlwCh-.js → index-Duu1Lvvv.js} +53 -53
  38. package/dist/{kernel_funcs_utils-CCzYdUZg.js → kernel_funcs_utils-BtYrPoJu.js} +6 -6
  39. package/dist/layers/BaseLayer.js +2 -2
  40. package/dist/layers/CausalSelfAttention.js +6 -6
  41. package/dist/layers/MLP.js +4 -4
  42. package/dist/layers/PositionEmbedding.js +5 -5
  43. package/dist/layers/RMSNorm.js +3 -3
  44. package/dist/layers/RoPECache.js +4 -4
  45. package/dist/layers/TiedEmbedding.js +6 -6
  46. package/dist/layers/TransformerBlock.js +1 -1
  47. package/dist/loader/loadTransformers.js +1 -1
  48. package/dist/loader/oldZipLoad.js +9 -9
  49. package/dist/log_sum_exp-CVqLsVLl.js +39 -0
  50. package/dist/main.d.ts +10 -1
  51. package/dist/main.js +68 -58
  52. package/dist/{matMul16-BWRSOCWB.js → matMul16-xswmhSuF.js} +3 -3
  53. package/dist/{matMulGelu-CzfgT6Wq.js → matMulGelu-BpvgnYG8.js} +14 -14
  54. package/dist/mat_mul-Bn2BDpT4.js +11 -0
  55. package/dist/{mod-AnXEvvpo.js → mod-B4AUd1Np.js} +1 -1
  56. package/dist/models/NanoGPTV1.js +2 -2
  57. package/dist/models/model.js +9 -9
  58. package/dist/{ones-D2rT0xk2.js → ones-CBI1AQjb.js} +3 -3
  59. package/dist/ops/adamAdjust.js +1 -1
  60. package/dist/ops/adamMoments.js +1 -1
  61. package/dist/ops/add16.js +1 -1
  62. package/dist/ops/appendCache.js +3 -3
  63. package/dist/ops/attentionMask.js +1 -1
  64. package/dist/ops/concat16.js +2 -2
  65. package/dist/ops/cpu/adamAdjust.js +9 -9
  66. package/dist/ops/cpu/adamMoments.js +5 -5
  67. package/dist/ops/cpu/appendCache.js +6 -6
  68. package/dist/ops/cpu/attentionMask.js +10 -10
  69. package/dist/ops/cpu/fusedSoftmax.js +5 -5
  70. package/dist/ops/cpu/gatherSub.js +9 -9
  71. package/dist/ops/cpu/gelu.js +5 -5
  72. package/dist/ops/cpu/matMul16.js +2 -2
  73. package/dist/ops/cpu/matMulGelu.js +3 -3
  74. package/dist/ops/cpu/matMulMul.js +5 -5
  75. package/dist/ops/cpu/mulDropout.js +1 -1
  76. package/dist/ops/cpu/normRMS.js +7 -7
  77. package/dist/ops/cpu/qkv.js +3 -3
  78. package/dist/ops/cpu/rope.js +5 -5
  79. package/dist/ops/cpu/scatterSub.js +11 -11
  80. package/dist/ops/dot16.js +2 -2
  81. package/dist/ops/gatherSub.js +1 -1
  82. package/dist/ops/gelu.js +2 -2
  83. package/dist/ops/grads/add16.js +4 -4
  84. package/dist/ops/grads/attentionMask.js +2 -2
  85. package/dist/ops/grads/gelu.js +2 -2
  86. package/dist/ops/grads/matMul16.js +3 -3
  87. package/dist/ops/grads/matMulGelu.js +6 -6
  88. package/dist/ops/grads/normRMS.js +4 -4
  89. package/dist/ops/grads/pack16.js +3 -3
  90. package/dist/ops/grads/qkv.js +10 -10
  91. package/dist/ops/grads/rope.js +2 -2
  92. package/dist/ops/grads/softmax16.js +1 -1
  93. package/dist/ops/grads/unpack16.js +2 -2
  94. package/dist/ops/matMul16.js +3 -3
  95. package/dist/ops/matMulGelu.js +2 -2
  96. package/dist/ops/matMulMul.js +1 -1
  97. package/dist/ops/mul16.js +1 -1
  98. package/dist/ops/mulDrop.js +1 -1
  99. package/dist/ops/normRMS.js +1 -1
  100. package/dist/ops/pack16.js +2 -2
  101. package/dist/ops/qkv.js +1 -1
  102. package/dist/ops/reshape16.js +2 -2
  103. package/dist/ops/rope.js +2 -2
  104. package/dist/ops/scatterSub.js +1 -1
  105. package/dist/ops/slice16.js +2 -2
  106. package/dist/ops/softmax16.js +1 -1
  107. package/dist/ops/sub16.js +1 -1
  108. package/dist/ops/sum16.js +2 -2
  109. package/dist/ops/transpose16.js +6 -6
  110. package/dist/ops/unpack16.js +2 -2
  111. package/dist/ops/webgl/adamAdjust.js +2 -2
  112. package/dist/ops/webgl/adamMoments.js +1 -1
  113. package/dist/ops/webgl/appendCache.js +1 -1
  114. package/dist/ops/webgl/attentionMask.js +1 -1
  115. package/dist/ops/webgl/fusedSoftmax.js +4 -4
  116. package/dist/ops/webgl/gatherSub.js +1 -1
  117. package/dist/ops/webgl/gelu.js +2 -2
  118. package/dist/ops/webgl/log.js +3 -3
  119. package/dist/ops/webgl/matMul16.js +8 -8
  120. package/dist/ops/webgl/matMulGelu.js +4 -4
  121. package/dist/ops/webgl/matMulMul.js +7 -7
  122. package/dist/ops/webgl/mulDropout.js +1 -1
  123. package/dist/ops/webgl/normRMS.js +7 -7
  124. package/dist/ops/webgl/qkv.js +1 -1
  125. package/dist/ops/webgl/rope.js +1 -1
  126. package/dist/ops/webgl/scatterSub.js +1 -1
  127. package/dist/ops/webgpu/adamAdjust.js +3 -3
  128. package/dist/ops/webgpu/adamMoments.js +5 -5
  129. package/dist/ops/webgpu/add16.js +1 -1
  130. package/dist/ops/webgpu/appendCache.js +3 -3
  131. package/dist/ops/webgpu/attentionMask.js +2 -2
  132. package/dist/ops/webgpu/attentionMask32_program.js +2 -2
  133. package/dist/ops/webgpu/concat16.js +5 -5
  134. package/dist/ops/webgpu/gatherSub.js +5 -5
  135. package/dist/ops/webgpu/gelu.js +3 -3
  136. package/dist/ops/webgpu/matMul16.js +19 -19
  137. package/dist/ops/webgpu/matMul16_program.js +2 -2
  138. package/dist/ops/webgpu/mul16.js +4 -4
  139. package/dist/ops/webgpu/normRMS.js +6 -6
  140. package/dist/ops/webgpu/normRMSGrad.js +4 -4
  141. package/dist/ops/webgpu/pack16.js +3 -3
  142. package/dist/ops/webgpu/pack16_program.js +2 -2
  143. package/dist/ops/webgpu/qkv.js +8 -8
  144. package/dist/ops/webgpu/rope.js +3 -3
  145. package/dist/ops/webgpu/scatterSub.js +3 -3
  146. package/dist/ops/webgpu/slice16.js +4 -4
  147. package/dist/ops/webgpu/softmax16.js +4 -4
  148. package/dist/ops/webgpu/softmax16_program.js +2 -2
  149. package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
  150. package/dist/ops/webgpu/softmax16grad.js +1 -1
  151. package/dist/ops/webgpu/sub16.js +4 -4
  152. package/dist/ops/webgpu/sum16.js +5 -5
  153. package/dist/ops/webgpu/transpose16.js +2 -2
  154. package/dist/ops/webgpu/transpose16_program.js +2 -2
  155. package/dist/ops/webgpu/transpose16_shared_program.js +3 -3
  156. package/dist/ops/webgpu/unpack16.js +5 -5
  157. package/dist/ops/webgpu/utils/binary_op.js +3 -3
  158. package/dist/ops/webgpu/utils/reductions.js +4 -4
  159. package/dist/{ops-B5yanEdW.js → ops-C2_OXuZ4.js} +69 -69
  160. package/dist/{pack16-nQ6JaLo-.js → pack16-atD0eYRm.js} +9 -9
  161. package/dist/patches/webgpu_backend.js +6 -6
  162. package/dist/patches/webgpu_base.js +1 -1
  163. package/dist/patches/webgpu_program.js +8 -8
  164. package/dist/{random_width-or-CEftb.js → random_width-BN4wGJaW.js} +33 -33
  165. package/dist/range-DKmP1-OQ.js +10 -0
  166. package/dist/relu-BsXmGzzu.js +9 -0
  167. package/dist/{reshape-ByE68wS9.js → reshape-BI0yzp1T.js} +1 -1
  168. package/dist/{resize_nearest_neighbor-B19mCEg2.js → resize_nearest_neighbor-BA_BX-ub.js} +26 -26
  169. package/dist/{rope-Ir4mTyD1.js → rope-DJ7Y7c-u.js} +1 -1
  170. package/dist/{scatter_nd_util-lvSiX8q4.js → scatter_nd_util-k9MUVUkn.js} +1 -1
  171. package/dist/{selu_util-kbhpTdYD.js → selu_util-DyW0X1WG.js} +5 -5
  172. package/dist/{shared-DT1TkE6w.js → shared-Q3BS6T03.js} +1 -1
  173. package/dist/{shared-dntlHIDQ.js → shared-nnSWpC3u.js} +86 -86
  174. package/dist/{slice-BfEGSH82.js → slice-wBNvzVyz.js} +1 -1
  175. package/dist/{slice_util-uTKwiEpW.js → slice_util-zN8KFC5I.js} +1 -1
  176. package/dist/{softmax-CA5jFsLR.js → softmax-DfuYyjMh.js} +1 -1
  177. package/dist/split-BYrLboMq.js +9 -0
  178. package/dist/squeeze-Bk8Brcct.js +10 -0
  179. package/dist/{stack-Cf4n9h0N.js → stack-CDWShFHF.js} +1 -1
  180. package/dist/{step-CINUs5QB.js → step-BS5JXRR6.js} +23 -23
  181. package/dist/{sum-DWAtNGez.js → sum-BPUfDB2X.js} +3 -3
  182. package/dist/tensor-CEt9Nm2s.js +8 -0
  183. package/dist/tensor1d-Cc_KCIDg.js +11 -0
  184. package/dist/{tensor2d-Bs9wZRc7.js → tensor2d-BN97fF71.js} +3 -3
  185. package/dist/{tensor4d-BARPdTaS.js → tensor4d-vuDDgdUI.js} +1 -1
  186. package/dist/{tfjs_backend-y1cvNhLA.js → tfjs_backend-806hyYve.js} +49 -49
  187. package/dist/{tile-mbfagpsB.js → tile-OWUvpIVt.js} +3 -3
  188. package/dist/tokeniser/BaseTokeniser.d.ts +25 -0
  189. package/dist/tokeniser/BaseTokeniser.js +94 -0
  190. package/dist/tokeniser/CharTokeniser.d.ts +10 -9
  191. package/dist/tokeniser/CharTokeniser.js +44 -30
  192. package/dist/tokeniser/bpe.d.ts +10 -9
  193. package/dist/tokeniser/bpe.js +67 -52
  194. package/dist/tokeniser/type.d.ts +14 -5
  195. package/dist/training/Adam.js +2 -2
  196. package/dist/training/AdamExt.js +1 -1
  197. package/dist/training/DatasetBuilder.d.ts +3 -3
  198. package/dist/training/DatasetBuilder.js +34 -38
  199. package/dist/training/FullTrainer.js +1 -1
  200. package/dist/training/Trainer.d.ts +4 -3
  201. package/dist/training/Trainer.js +22 -25
  202. package/dist/training/sparseCrossEntropy.js +3 -3
  203. package/dist/training/tasks/ConversationTask.d.ts +11 -0
  204. package/dist/training/tasks/ConversationTask.js +26 -0
  205. package/dist/training/tasks/PretrainingTask.d.ts +11 -0
  206. package/dist/training/tasks/PretrainingTask.js +34 -0
  207. package/dist/training/tasks/StartSentenceTask.d.ts +12 -0
  208. package/dist/training/tasks/StartSentenceTask.js +42 -0
  209. package/dist/training/tasks/Task.d.ts +8 -0
  210. package/dist/training/tasks/Task.js +41 -0
  211. package/dist/{transpose-ClWiBS_b.js → transpose-BUkQCJp9.js} +6 -6
  212. package/dist/{unsorted_segment_sum-BDDhB_E6.js → unsorted_segment_sum-BljxHhCY.js} +5 -5
  213. package/dist/utilities/dummy.js +3 -3
  214. package/dist/utilities/multinomialCPU.js +2 -2
  215. package/dist/utilities/packed.js +1 -1
  216. package/dist/utilities/performance.js +1 -1
  217. package/dist/utilities/profile.js +1 -1
  218. package/dist/utilities/safetensors.js +2 -2
  219. package/dist/utilities/sentences.d.ts +1 -1
  220. package/dist/utilities/sentences.js +11 -11
  221. package/dist/utilities/weights.js +2 -2
  222. package/dist/{variable-WawDEaAb.js → variable-DPt_Iuog.js} +1 -1
  223. package/dist/{webgpu_program-DuOXPQol.js → webgpu_program-BpWRlghH.js} +3 -3
  224. package/dist/{webgpu_util-RxEF33Rj.js → webgpu_util-DMiKzzQM.js} +7 -7
  225. package/dist/{zeros-KnWaWf-X.js → zeros-5YROwwUH.js} +2 -2
  226. package/dist/{zeros_like-DvE73F4e.js → zeros_like-De4n1C3m.js} +71 -71
  227. package/package.json +1 -1
  228. package/dist/complex-DjxcVmoX.js +0 -11
  229. package/dist/concat-BV8bt5H-.js +0 -17
  230. package/dist/expand_dims-DT4tEPwA.js +0 -11
  231. package/dist/log_sum_exp-ngO0-4pK.js +0 -39
  232. package/dist/mat_mul-SjpJRLyL.js +0 -11
  233. package/dist/range-BklejeeW.js +0 -10
  234. package/dist/relu-CP0ZcxWO.js +0 -9
  235. package/dist/split-CVLc0w--.js +0 -9
  236. package/dist/squeeze-C7Z2srUo.js +0 -10
  237. package/dist/tensor-DJoc7gJU.js +0 -8
  238. package/dist/tensor1d-D11P_7Dp.js +0 -11
@@ -1,5 +1,5 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { a as e } from "../../gelu-CjNPL4OH.js";
1
+ import "../../index-Duu1Lvvv.js";
2
+ import { a as e } from "../../gelu-BK1k-n1i.js";
3
3
  export {
4
4
  e as geluGradConfig
5
5
  };
@@ -1,6 +1,6 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { a } from "../../matMul16-BWRSOCWB.js";
3
- import "../../gelu-CjNPL4OH.js";
1
+ import "../../index-Duu1Lvvv.js";
2
+ import { a } from "../../matMul16-xswmhSuF.js";
3
+ import "../../gelu-BK1k-n1i.js";
4
4
  import "../transpose16.js";
5
5
  import "../reshape16.js";
6
6
  export {
@@ -1,17 +1,17 @@
1
- import { u as a, e as o } from "../../index-DOvlwCh-.js";
2
- function s(e, n, r) {
1
+ import { i as a, e as o } from "../../index-Duu1Lvvv.js";
2
+ function i(e, n, r) {
3
3
  return o().runKernel("MatMulGeluGrad", { dy: e, x: n, kernel: r });
4
4
  }
5
- const d = {
5
+ const s = {
6
6
  kernelName: "MatMulGelu",
7
7
  inputsToSave: ["x", "kernel"],
8
8
  outputsToSave: [],
9
9
  gradFunc: (e, n) => {
10
- const [r, u] = n, [t, l] = s(e, r, u);
10
+ const [r, t] = n, [u, l] = i(e, r, t);
11
11
  return {
12
- x: () => t,
12
+ x: () => u,
13
13
  kernel: () => l
14
14
  };
15
15
  }
16
16
  };
17
- a(d);
17
+ a(s);
@@ -1,13 +1,13 @@
1
- import { u as t, e as u } from "../../index-DOvlwCh-.js";
2
- function g(r, a, n) {
3
- return u().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
1
+ import { i as t, e as g } from "../../index-Duu1Lvvv.js";
2
+ function i(r, a, n) {
3
+ return g().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
4
4
  }
5
5
  const s = {
6
6
  kernelName: "RMSNorm",
7
7
  inputsToSave: ["x", "gamma"],
8
8
  outputsToSave: [],
9
9
  gradFunc: (r, a) => {
10
- const [n, e] = a, [m, o] = g(r, n, e);
10
+ const [n, e] = a, [m, o] = i(r, n, e);
11
11
  return {
12
12
  x: () => m,
13
13
  gamma: () => o
@@ -1,6 +1,6 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { b as i } from "../../pack16-nQ6JaLo-.js";
3
- import "../../slice-BfEGSH82.js";
1
+ import "../../index-Duu1Lvvv.js";
2
+ import { b as i } from "../../pack16-atD0eYRm.js";
3
+ import "../../slice-wBNvzVyz.js";
4
4
  export {
5
5
  i as packGradConfig
6
6
  };
@@ -1,34 +1,34 @@
1
- import { u as c } from "../../index-DOvlwCh-.js";
2
- import { a as f } from "../../matMul16-BWRSOCWB.js";
1
+ import { i as u } from "../../index-Duu1Lvvv.js";
2
+ import { a as f } from "../../matMul16-xswmhSuF.js";
3
3
  import { concat16 as g } from "../concat16.js";
4
4
  import { sum16 as l } from "../sum16.js";
5
- import { s as k } from "../../squeeze-C7Z2srUo.js";
6
- const i = {
5
+ import { s as k } from "../../squeeze-Bk8Brcct.js";
6
+ const d = {
7
7
  kernelName: "QKV",
8
8
  inputsToSave: ["x", "kernel"],
9
9
  outputsToSave: [],
10
10
  gradFunc: (e, s) => {
11
11
  const [r, n, t] = e, [a] = s, p = g([r, n, t], 1);
12
12
  r.dispose(), n.dispose(), t.dispose();
13
- const m = [a.shape[0], a.shape[1], 3 * a.shape[2]], d = f.gradFunc(p, s, {
13
+ const m = [a.shape[0], a.shape[1], 3 * a.shape[2]], i = f.gradFunc(p, s, {
14
14
  transposeA: !1,
15
15
  transposeB: !1,
16
16
  originalShape: m,
17
17
  perm: [0, 2, 1, 3]
18
18
  });
19
19
  return p.dispose(), {
20
- x: () => d.A(),
20
+ x: () => i.A(),
21
21
  kernel: () => {
22
- const o = d.B(), u = o.shape[0] === 1 ? k(o, [0]) : l(o, 0);
23
- return o.dispose(), u;
22
+ const o = i.B(), c = o.shape[0] === 1 ? k(o, [0]) : l(o, 0);
23
+ return o.dispose(), c;
24
24
  }
25
25
  };
26
26
  }
27
27
  };
28
28
  function B(e, s, r) {
29
- return i.gradFunc(e, [s, r], {});
29
+ return d.gradFunc(e, [s, r], {});
30
30
  }
31
- c(i);
31
+ u(d);
32
32
  export {
33
33
  B as qkvGrad
34
34
  };
@@ -1,5 +1,5 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { a as p } from "../../rope-Ir4mTyD1.js";
1
+ import "../../index-Duu1Lvvv.js";
2
+ import { a as p } from "../../rope-DJ7Y7c-u.js";
3
3
  export {
4
4
  p as ropeGradConfig
5
5
  };
@@ -1,4 +1,4 @@
1
- import { u as n, e as a } from "../../index-DOvlwCh-.js";
1
+ import { i as n, e as a } from "../../index-Duu1Lvvv.js";
2
2
  import { isPackedTensor as t } from "../../utilities/packed.js";
3
3
  function s(r, e) {
4
4
  return a().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
@@ -1,5 +1,5 @@
1
- import "../../index-DOvlwCh-.js";
2
- import { a as p } from "../../pack16-nQ6JaLo-.js";
1
+ import "../../index-Duu1Lvvv.js";
2
+ import { a as p } from "../../pack16-atD0eYRm.js";
3
3
  export {
4
4
  p as unpackGradConfig
5
5
  };
@@ -1,9 +1,9 @@
1
- import "../index-DOvlwCh-.js";
2
- import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-BWRSOCWB.js";
1
+ import "../index-Duu1Lvvv.js";
2
+ import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-xswmhSuF.js";
3
3
  import "./webgl/matMul16.js";
4
4
  import "./cpu/matMul16.js";
5
5
  import "../utilities/packed.js";
6
- import "../pack16-nQ6JaLo-.js";
6
+ import "../pack16-atD0eYRm.js";
7
7
  export {
8
8
  p as matMul16,
9
9
  u as matMul16Gelu,
@@ -1,6 +1,6 @@
1
- import { e as u } from "../index-DOvlwCh-.js";
1
+ import { e as u } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/matMulGelu.js";
3
- import "../matMulGelu-CzfgT6Wq.js";
3
+ import "../matMulGelu-BpvgnYG8.js";
4
4
  import "./grads/matMulGelu.js";
5
5
  function M(r, e) {
6
6
  return u().runKernel("MatMulGelu", { x: r, kernel: e });
@@ -1,4 +1,4 @@
1
- import { e as u } from "../index-DOvlwCh-.js";
1
+ import { e as u } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/matMulMul.js";
3
3
  import "./webgl/matMulMul.js";
4
4
  function m(e, r, t, l = !1, n = !1) {
package/dist/ops/mul16.js CHANGED
@@ -1,4 +1,4 @@
1
- import { m as t, e as u } from "../index-DOvlwCh-.js";
1
+ import { m as t, e as u } from "../index-Duu1Lvvv.js";
2
2
  import { isPackedTensor as n } from "../utilities/packed.js";
3
3
  function i(r, e) {
4
4
  return !n(r) && !n(e) ? t(r, e) : u().runKernel("Mul16", { a: r, b: e });
@@ -1,4 +1,4 @@
1
- import { e as t } from "../index-DOvlwCh-.js";
1
+ import { e as t } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/mulDropout.js";
3
3
  import "./webgl/mulDropout.js";
4
4
  function m(r, o, e, n) {
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-DOvlwCh-.js";
1
+ import { e as n } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/normRMS.js";
3
3
  import "./webgl/normRMS.js";
4
4
  import "./grads/normRMS.js";
@@ -1,5 +1,5 @@
1
- import "../index-DOvlwCh-.js";
2
- import { p as a } from "../pack16-nQ6JaLo-.js";
1
+ import "../index-Duu1Lvvv.js";
2
+ import { p as a } from "../pack16-atD0eYRm.js";
3
3
  export {
4
4
  a as pack16
5
5
  };
package/dist/ops/qkv.js CHANGED
@@ -1,4 +1,4 @@
1
- import { e as t } from "../index-DOvlwCh-.js";
1
+ import { e as t } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/qkv.js";
3
3
  import "./webgl/qkv.js";
4
4
  import "./grads/qkv.js";
@@ -1,5 +1,5 @@
1
- import { u as p, p as s, e as u } from "../index-DOvlwCh-.js";
2
- import { r as c } from "../reshape-ByE68wS9.js";
1
+ import { i as p, f as s, e as u } from "../index-Duu1Lvvv.js";
2
+ import { r as c } from "../reshape-BI0yzp1T.js";
3
3
  const i = {
4
4
  kernelName: "Reshape16",
5
5
  inputsToSave: ["x"],
package/dist/ops/rope.js CHANGED
@@ -1,7 +1,7 @@
1
- import "../index-DOvlwCh-.js";
1
+ import "../index-Duu1Lvvv.js";
2
2
  import "./cpu/rope.js";
3
3
  import "./webgl/rope.js";
4
- import { r as i } from "../rope-Ir4mTyD1.js";
4
+ import { r as i } from "../rope-DJ7Y7c-u.js";
5
5
  export {
6
6
  i as rope
7
7
  };
@@ -1,4 +1,4 @@
1
- import { e as i } from "../index-DOvlwCh-.js";
1
+ import { e as i } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/scatterSub.js";
3
3
  import "./webgl/scatterSub.js";
4
4
  function c(t, r, e) {
@@ -1,6 +1,6 @@
1
1
  import { isPackedTensor as n } from "../utilities/packed.js";
2
- import { e as c } from "../index-DOvlwCh-.js";
3
- import { s as i } from "../slice-BfEGSH82.js";
2
+ import { e as c } from "../index-Duu1Lvvv.js";
3
+ import { s as i } from "../slice-wBNvzVyz.js";
4
4
  function a(r, e, o) {
5
5
  return n(r) ? c().runKernel("Slice16", { x: r }, { begin: e, size: o }) : i(r, e, o);
6
6
  }
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-DOvlwCh-.js";
1
+ import { e as n } from "../index-Duu1Lvvv.js";
2
2
  import "./grads/softmax16.js";
3
3
  import { isPackedTensor as e } from "../utilities/packed.js";
4
4
  function t(r) {
package/dist/ops/sub16.js CHANGED
@@ -1,4 +1,4 @@
1
- import { c as s, e as t } from "../index-DOvlwCh-.js";
1
+ import { c as s, e as t } from "../index-Duu1Lvvv.js";
2
2
  import { isPackedTensor as n } from "../utilities/packed.js";
3
3
  function c(r, e) {
4
4
  return !n(r) && !n(e) ? s(r, e) : t().runKernel("Sub16", { a: r, b: e });
package/dist/ops/sum16.js CHANGED
@@ -1,6 +1,6 @@
1
- import { e as t } from "../index-DOvlwCh-.js";
1
+ import { e as t } from "../index-Duu1Lvvv.js";
2
2
  import { isPackedTensor as s } from "../utilities/packed.js";
3
- import { s as n } from "../sum-DWAtNGez.js";
3
+ import { s as n } from "../sum-BPUfDB2X.js";
4
4
  function p(r, o, e = !1) {
5
5
  if (!s(r))
6
6
  return n(r, o, e);
@@ -1,20 +1,20 @@
1
- import { u as i, p, e as u } from "../index-DOvlwCh-.js";
1
+ import { i, f as p, e as u } from "../index-Duu1Lvvv.js";
2
2
  import { forcePacked as l, forceFloat as m } from "./grads/utils.js";
3
- import { g } from "../axis_util-BaG7mf5A.js";
4
- import { isPackedTensor as f } from "../utilities/packed.js";
5
- import { t as a } from "../transpose-ClWiBS_b.js";
3
+ import { g as f } from "../axis_util-DGqbT-FX.js";
4
+ import { isPackedTensor as g } from "../utilities/packed.js";
5
+ import { t as a } from "../transpose-BUkQCJp9.js";
6
6
  const d = {
7
7
  kernelName: "Transpose16",
8
8
  gradFunc: (r, s, t) => {
9
9
  if (Array.isArray(r))
10
10
  throw new Error("Transpose16 gradient does not support multiple outputs.");
11
- const n = t, { perm: e } = n, o = g(e);
11
+ const n = t, { perm: e } = n, o = f(e);
12
12
  return { x: () => T(r, o) };
13
13
  }
14
14
  };
15
15
  i(d);
16
16
  function c(r) {
17
- const { inputs: s, attrs: t } = r, { x: n } = s, { perm: e } = t, o = f(n);
17
+ const { inputs: s, attrs: t } = r, { x: n } = s, { perm: e } = t, o = g(n);
18
18
  if (o && e[e.length - 1] !== n.shape.length - 1)
19
19
  throw new Error("Transpose16 currently only supports the last axis being unchanged.");
20
20
  return o ? l(a(m(n), e)) : a(n, e);
@@ -1,5 +1,5 @@
1
- import "../index-DOvlwCh-.js";
2
- import { u as t } from "../pack16-nQ6JaLo-.js";
1
+ import "../index-Duu1Lvvv.js";
2
+ import { u as t } from "../pack16-atD0eYRm.js";
3
3
  import "../utilities/packed.js";
4
4
  export {
5
5
  t as unpack16
@@ -1,5 +1,5 @@
1
- import { r as n } from "../../Reshape-Zt6eb7yh.js";
2
- import { p as f } from "../../index-DOvlwCh-.js";
1
+ import { r as n } from "../../Reshape-Ck29jQSY.js";
2
+ import { f } from "../../index-Duu1Lvvv.js";
3
3
  class v {
4
4
  variableNames = ["moments", "value"];
5
5
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p as m } from "../../index-DOvlwCh-.js";
1
+ import { f as m } from "../../index-Duu1Lvvv.js";
2
2
  class i {
3
3
  variableNames = ["moments", "gradient"];
4
4
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p } from "../../index-DOvlwCh-.js";
1
+ import { f as p } from "../../index-Duu1Lvvv.js";
2
2
  class m {
3
3
  variableNames = ["cache", "item"];
4
4
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p as m } from "../../index-DOvlwCh-.js";
1
+ import { f as m } from "../../index-Duu1Lvvv.js";
2
2
  class h {
3
3
  variableNames = ["q", "k"];
4
4
  outputShape;
@@ -1,7 +1,7 @@
1
- import { m as b, s as I, r as k } from "../../RealDiv-KAPDe8zB.js";
2
- import { r as v } from "../../Reshape-Zt6eb7yh.js";
3
- import { p as w, af as P } from "../../index-DOvlwCh-.js";
4
- import { e as S } from "../../axis_util-BaG7mf5A.js";
1
+ import { m as b, s as I, r as k } from "../../RealDiv-C8neBwFi.js";
2
+ import { r as v } from "../../Reshape-Ck29jQSY.js";
3
+ import { f as w, af as P } from "../../index-Duu1Lvvv.js";
4
+ import { e as S } from "../../axis_util-DGqbT-FX.js";
5
5
  class T {
6
6
  variableNames = ["logits", "maxLogits"];
7
7
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p as l } from "../../index-DOvlwCh-.js";
1
+ import { f as l } from "../../index-Duu1Lvvv.js";
2
2
  class u {
3
3
  variableNames = ["labels", "logits", "values"];
4
4
  outputShape;
@@ -1,5 +1,5 @@
1
- import { p as a } from "../../index-DOvlwCh-.js";
2
- import { u as s, C as i } from "../../kernel_funcs_utils-CCzYdUZg.js";
1
+ import { f as a } from "../../index-Duu1Lvvv.js";
2
+ import { u as s, C as i } from "../../kernel_funcs_utils-BtYrPoJu.js";
3
3
  const t = 0.7978845608028654, r = 0.044715, c = i + `
4
4
  float x3 = x * x * x;
5
5
  float inner = x + ${r} * x3;
@@ -1,6 +1,6 @@
1
- import { p as e, ao as r } from "../../index-DOvlwCh-.js";
2
- import { u as s, l as N } from "../../kernel_funcs_utils-CCzYdUZg.js";
3
- import { y as l } from "../../shared-DT1TkE6w.js";
1
+ import { f as e, ao as r } from "../../index-Duu1Lvvv.js";
2
+ import { u as s, l as N } from "../../kernel_funcs_utils-BtYrPoJu.js";
3
+ import { y as l } from "../../shared-Q3BS6T03.js";
4
4
  const a = N + `
5
5
  return x < 0.0 ? NAN : log(x);
6
6
  `, t = `
@@ -1,16 +1,16 @@
1
1
  import { isPackedTensor as k } from "../../utilities/packed.js";
2
- import { p as g, m as M, b as m } from "../../index-DOvlwCh-.js";
2
+ import { f as g, m as M, b as m } from "../../index-Duu1Lvvv.js";
3
3
  import { matMulMul as N } from "../matMulMul.js";
4
4
  import { matMulGelu as U } from "../matMulGelu.js";
5
- import { m as G } from "../../mat_mul-SjpJRLyL.js";
6
- import { r as w } from "../../reshape-ByE68wS9.js";
7
- import { t as h } from "../../transpose-ClWiBS_b.js";
8
- function P(p) {
9
- const { A: r, B: o } = p.inputs, { transposeA: l, transposeB: c, scale: u, activation: A, scaleA: d, scaleB: f, forceOutputShape: t, perm: n } = p.attrs, B = !k(r), v = !k(o);
5
+ import { m as G } from "../../mat_mul-Bn2BDpT4.js";
6
+ import { r as w } from "../../reshape-BI0yzp1T.js";
7
+ import { t as h } from "../../transpose-BUkQCJp9.js";
8
+ function P(l) {
9
+ const { A: r, B: o } = l.inputs, { transposeA: p, transposeB: c, scale: u, activation: A, scaleA: f, scaleB: d, forceOutputShape: t, perm: n } = l.attrs, B = !k(r), v = !k(o);
10
10
  if (B && v) {
11
- const a = d !== void 0 ? M(r, m(d)) : r, i = f !== void 0 ? M(o, m(f)) : o;
11
+ const a = f !== void 0 ? M(r, m(f)) : r, i = d !== void 0 ? M(o, m(d)) : o;
12
12
  let e;
13
- if (u !== void 0 ? e = N(a, i, m(u), l, c) : A === "gelu" ? e = U(a, i) : e = G(a, i, l, c), n)
13
+ if (u !== void 0 ? e = N(a, i, m(u), p, c) : A === "gelu" ? e = U(a, i) : e = G(a, i, p, c), n)
14
14
  if (t) {
15
15
  const s = w(e, t);
16
16
  e.dispose();
@@ -1,7 +1,7 @@
1
- import "../../index-DOvlwCh-.js";
2
- import "../../Reshape-Zt6eb7yh.js";
3
- import { a as m, b as o, c as p } from "../../matMulGelu-CzfgT6Wq.js";
4
- import "../../mat_mul-SjpJRLyL.js";
1
+ import "../../index-Duu1Lvvv.js";
2
+ import "../../Reshape-Ck29jQSY.js";
3
+ import { a as m, b as o, c as p } from "../../matMulGelu-BpvgnYG8.js";
4
+ import "../../mat_mul-Bn2BDpT4.js";
5
5
  export {
6
6
  m as MATMUL_SHARED_DIM_THRESHOLD,
7
7
  o as batchMatMulGeluImpl,
@@ -1,9 +1,9 @@
1
- import { p as u } from "../../index-DOvlwCh-.js";
2
- import { b as c } from "../../matMulGelu-CzfgT6Wq.js";
3
- const p = `
1
+ import { f as u } from "../../index-Duu1Lvvv.js";
2
+ import { b as c } from "../../matMulGelu-BpvgnYG8.js";
3
+ const M = `
4
4
  return a * b;
5
5
  `;
6
- function M(r) {
6
+ function p(r) {
7
7
  const { inputs: n, backend: a, attrs: o } = r, { x: t, kernel: e, y: l } = n, { transposeA: s, transposeB: i } = o;
8
8
  if (t === void 0 || e === void 0)
9
9
  throw new Error("BatchMatMul requires two input tensors.");
@@ -13,16 +13,16 @@ function M(r) {
13
13
  transposeA: s,
14
14
  transposeB: i,
15
15
  backend: a,
16
- activationSnippet: p,
16
+ activationSnippet: M,
17
17
  multiplier: l
18
18
  });
19
19
  }
20
20
  const m = {
21
21
  kernelName: "MatMulMul",
22
22
  backendName: "webgl",
23
- kernelFunc: M
23
+ kernelFunc: p
24
24
  };
25
25
  u(m);
26
26
  export {
27
- M as batchMatMulKernel
27
+ p as batchMatMulKernel
28
28
  };
@@ -1,4 +1,4 @@
1
- import { p as m } from "../../index-DOvlwCh-.js";
1
+ import { f as m } from "../../index-Duu1Lvvv.js";
2
2
  class f {
3
3
  variableNames = ["a", "b"];
4
4
  outputShape;
@@ -1,5 +1,5 @@
1
- import { p as g, e as G } from "../../index-DOvlwCh-.js";
2
- import { s as x } from "../../sum-DWAtNGez.js";
1
+ import { f as p, e as G } from "../../index-Duu1Lvvv.js";
2
+ import { s as x } from "../../sum-BPUfDB2X.js";
3
3
  class y {
4
4
  variableNames = ["x", "meanSquare", "gamma"];
5
5
  outputShape;
@@ -28,7 +28,7 @@ const C = {
28
28
  backendName: "webgl",
29
29
  kernelFunc: v
30
30
  };
31
- g(C);
31
+ p(C);
32
32
  class b {
33
33
  variableNames = ["x", "meanSquare", "dyGamma", "dyXMean"];
34
34
  outputShape;
@@ -73,14 +73,14 @@ function M(t) {
73
73
  l.dispose();
74
74
  const f = new b(n, m, u), S = r.runWebGLProgram(f, [e, d, s, i], "float32");
75
75
  s.dispose(), i.dispose();
76
- const h = new N(n, m, u), p = r.runWebGLProgram(h, [e, d, a], "float32");
76
+ const h = new N(n, m, u), g = r.runWebGLProgram(h, [e, d, a], "float32");
77
77
  d.dispose();
78
- const q = x(G().makeTensorFromTensorInfo(p), [0, 1]);
79
- return r.disposeIntermediateTensorInfo(p), [S, q];
78
+ const q = x(G().makeTensorFromTensorInfo(g), [0, 1]);
79
+ return r.disposeIntermediateTensorInfo(g), [S, q];
80
80
  }
81
81
  const k = {
82
82
  kernelName: "RMSNormGrad",
83
83
  backendName: "webgl",
84
84
  kernelFunc: M
85
85
  };
86
- g(k);
86
+ p(k);
@@ -1,4 +1,4 @@
1
- import { p as i } from "../../index-DOvlwCh-.js";
1
+ import { f as i } from "../../index-Duu1Lvvv.js";
2
2
  class l {
3
3
  variableNames = ["x", "kernel"];
4
4
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p as h } from "../../index-DOvlwCh-.js";
1
+ import { f as h } from "../../index-Duu1Lvvv.js";
2
2
  class g {
3
3
  variableNames = ["x", "sin", "cos"];
4
4
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p as i } from "../../index-DOvlwCh-.js";
1
+ import { f as i } from "../../index-Duu1Lvvv.js";
2
2
  class u {
3
3
  variableNames = ["labels", "softmaxProbs", "dy"];
4
4
  outputShape;
@@ -1,6 +1,6 @@
1
- import { e as p } from "../../webgpu_program-DuOXPQol.js";
2
- import { f as d, c as l } from "../../webgpu_util-RxEF33Rj.js";
3
- import { p as f, ab as c } from "../../index-DOvlwCh-.js";
1
+ import { e as p } from "../../webgpu_program-BpWRlghH.js";
2
+ import { f as d, c as l } from "../../webgpu_util-DMiKzzQM.js";
3
+ import { f, a6 as c } from "../../index-Duu1Lvvv.js";
4
4
  class h {
5
5
  variableNames = ["moments", "value"];
6
6
  outputShape;
@@ -1,6 +1,6 @@
1
- import { e as u } from "../../webgpu_program-DuOXPQol.js";
2
- import { f as p, c as d } from "../../webgpu_util-RxEF33Rj.js";
3
- import { p as c, ab as f } from "../../index-DOvlwCh-.js";
1
+ import { e as u } from "../../webgpu_program-BpWRlghH.js";
2
+ import { f as p, c as d } from "../../webgpu_util-DMiKzzQM.js";
3
+ import { f, a6 as c } from "../../index-Duu1Lvvv.js";
4
4
  class l {
5
5
  variableNames = ["moments", "gradient"];
6
6
  outputShape;
@@ -42,7 +42,7 @@ function h(e) {
42
42
  const { moments: t, gradient: a } = e.inputs, { beta1: n, beta2: o, lossScaling: s } = e.attrs, r = e.backend;
43
43
  if (a.dtype !== "float32")
44
44
  throw new Error(`Gradient must be float32, but got ${a.dtype}`);
45
- if (f(t.shape, [...a.shape, 2], "Error in AdamMoments: "), n < 0 || n >= 1)
45
+ if (c(t.shape, [...a.shape, 2], "Error in AdamMoments: "), n < 0 || n >= 1)
46
46
  throw new Error(`Invalid beta1 value: ${n}. Must be in the range [0, 1).`);
47
47
  if (o < 0 || o >= 1)
48
48
  throw new Error(`Invalid beta2 value: ${o}. Must be in the range [0, 1).`);
@@ -58,4 +58,4 @@ const g = {
58
58
  backendName: "webgpu",
59
59
  kernelFunc: h
60
60
  };
61
- c(g);
61
+ f(g);
@@ -1,4 +1,4 @@
1
- import { p as t } from "../../index-DOvlwCh-.js";
1
+ import { f as t } from "../../index-Duu1Lvvv.js";
2
2
  import { BinaryOpProgram as p } from "./utils/binary_op.js";
3
3
  import { B as s } from "../../binary_op_util-pKXltfxI.js";
4
4
  function c(e) {
@@ -1,7 +1,7 @@
1
1
  import { isPackedTensor as T } from "../../utilities/packed.js";
2
- import { e as p } from "../../webgpu_program-DuOXPQol.js";
3
- import { f as d, c as u } from "../../webgpu_util-RxEF33Rj.js";
4
- import { p as S, ab as g } from "../../index-DOvlwCh-.js";
2
+ import { e as p } from "../../webgpu_program-BpWRlghH.js";
3
+ import { f as d, c as u } from "../../webgpu_util-DMiKzzQM.js";
4
+ import { f as S, a6 as g } from "../../index-Duu1Lvvv.js";
5
5
  class x {
6
6
  variableNames = ["cache", "item"];
7
7
  outputShape;
@@ -1,6 +1,6 @@
1
- import { p as d, ab as b } from "../../index-DOvlwCh-.js";
1
+ import { f as d, a6 as b } from "../../index-Duu1Lvvv.js";
2
2
  import { isPackedTensor as p } from "../../utilities/packed.js";
3
- import { b as l } from "../../matMul16-BWRSOCWB.js";
3
+ import { b as l } from "../../matMul16-xswmhSuF.js";
4
4
  import M from "./attentionMask32_program.js";
5
5
  function w(n) {
6
6
  const { q: t, k: e } = n.inputs, { divisor: a, pastLen: o } = n.attrs, m = n.backend;
@@ -1,5 +1,5 @@
1
- import { e as r } from "../../webgpu_program-DuOXPQol.js";
2
- import { f as a, c as u } from "../../webgpu_util-RxEF33Rj.js";
1
+ import { e as r } from "../../webgpu_program-BpWRlghH.js";
2
+ import { f as a, c as u } from "../../webgpu_util-DMiKzzQM.js";
3
3
  class p {
4
4
  variableNames = ["q", "k"];
5
5
  outputShape;
@@ -1,8 +1,8 @@
1
- import { p as x, af as I, h as c } from "../../index-DOvlwCh-.js";
2
- import { e as D } from "../../webgpu_program-DuOXPQol.js";
3
- import { f as $, c as F } from "../../webgpu_util-RxEF33Rj.js";
4
- import { r as g } from "../../Reshape-BYkmUnAv.js";
5
- import { a as L, c as d } from "../../concat_util-DpW8mL_l.js";
1
+ import { f as x, af as I, U as c } from "../../index-Duu1Lvvv.js";
2
+ import { e as D } from "../../webgpu_program-BpWRlghH.js";
3
+ import { f as $, c as F } from "../../webgpu_util-DMiKzzQM.js";
4
+ import { r as g } from "../../Reshape-Bd4V_4X7.js";
5
+ import { a as L, c as d } from "../../concat_util-D0je5Ppu.js";
6
6
  class T {
7
7
  outputShape;
8
8
  shaderKey;