@genai-fi/nanogpt 0.10.3 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (238) hide show
  1. package/dist/Generator.d.ts +10 -5
  2. package/dist/Generator.js +1789 -1765
  3. package/dist/{RealDiv-KAPDe8zB.js → RealDiv-C8neBwFi.js} +15 -15
  4. package/dist/{Reshape-BYkmUnAv.js → Reshape-Bd4V_4X7.js} +1 -1
  5. package/dist/{Reshape-Zt6eb7yh.js → Reshape-Ck29jQSY.js} +5 -5
  6. package/dist/TeachableLLM.d.ts +5 -3
  7. package/dist/TeachableLLM.js +14 -14
  8. package/dist/Trainer.d.ts +3 -1
  9. package/dist/Trainer.js +11 -8
  10. package/dist/{axis_util-BaG7mf5A.js → axis_util-DGqbT-FX.js} +3 -3
  11. package/dist/backend.js +2 -2
  12. package/dist/{backend_util-RCe-rHaj.js → backend_util-DC3rBo_H.js} +18 -18
  13. package/dist/{backend_webgpu-DE3ACOLx.js → backend_webgpu-mbhNnlx9.js} +3 -3
  14. package/dist/{broadcast_to-B3eYlZm7.js → broadcast_to-D1Dmg2Oz.js} +2 -2
  15. package/dist/checks/appendCache.js +2 -2
  16. package/dist/checks/attentionMask.js +3 -3
  17. package/dist/checks/gelu.js +2 -2
  18. package/dist/checks/matMulGelu.js +2 -2
  19. package/dist/checks/normRMS.js +4 -4
  20. package/dist/checks/normRMSGrad.js +3 -3
  21. package/dist/checks/packUnpack.js +2 -2
  22. package/dist/checks/qkv.js +4 -4
  23. package/dist/checks/rope.js +2 -2
  24. package/dist/{clip_by_value-BnO7-a88.js → clip_by_value-fg2aKzUy.js} +5 -5
  25. package/dist/complex-Cyg-eQeZ.js +11 -0
  26. package/dist/concat-CSm2rMwe.js +17 -0
  27. package/dist/{concat_util-DpW8mL_l.js → concat_util-D0je5Ppu.js} +1 -1
  28. package/dist/{dataset-BcwmTGYc.js → dataset-CVIJu7Xa.js} +7 -7
  29. package/dist/{dropout-BcvN9JYi.js → dropout-DLhSMNTZ.js} +9 -9
  30. package/dist/expand_dims-ChkuOp6I.js +11 -0
  31. package/dist/{exports_initializers-Hta_rEnm.js → exports_initializers-1KWPiStI.js} +1 -1
  32. package/dist/{floor-D5QdR_le.js → floor-BRMPgeIs.js} +1 -1
  33. package/dist/{gather-D3JcZUaI.js → gather-BSULDalH.js} +1 -1
  34. package/dist/{gelu-CjNPL4OH.js → gelu-BK1k-n1i.js} +1 -1
  35. package/dist/{gpgpu_math-DAOmgtXR.js → gpgpu_math-BJSTk_mW.js} +25 -25
  36. package/dist/{index-BwexR4lA.js → index-BBVLAXZD.js} +89 -89
  37. package/dist/{index-DOvlwCh-.js → index-Duu1Lvvv.js} +53 -53
  38. package/dist/{kernel_funcs_utils-CCzYdUZg.js → kernel_funcs_utils-BtYrPoJu.js} +6 -6
  39. package/dist/layers/BaseLayer.js +2 -2
  40. package/dist/layers/CausalSelfAttention.js +6 -6
  41. package/dist/layers/MLP.js +4 -4
  42. package/dist/layers/PositionEmbedding.js +5 -5
  43. package/dist/layers/RMSNorm.js +3 -3
  44. package/dist/layers/RoPECache.js +4 -4
  45. package/dist/layers/TiedEmbedding.js +6 -6
  46. package/dist/layers/TransformerBlock.js +1 -1
  47. package/dist/loader/loadTransformers.js +1 -1
  48. package/dist/loader/oldZipLoad.js +9 -9
  49. package/dist/log_sum_exp-CVqLsVLl.js +39 -0
  50. package/dist/main.d.ts +10 -1
  51. package/dist/main.js +68 -58
  52. package/dist/{matMul16-BWRSOCWB.js → matMul16-xswmhSuF.js} +3 -3
  53. package/dist/{matMulGelu-CzfgT6Wq.js → matMulGelu-BpvgnYG8.js} +14 -14
  54. package/dist/mat_mul-Bn2BDpT4.js +11 -0
  55. package/dist/{mod-AnXEvvpo.js → mod-B4AUd1Np.js} +1 -1
  56. package/dist/models/NanoGPTV1.js +2 -2
  57. package/dist/models/model.js +9 -9
  58. package/dist/{ones-D2rT0xk2.js → ones-CBI1AQjb.js} +3 -3
  59. package/dist/ops/adamAdjust.js +1 -1
  60. package/dist/ops/adamMoments.js +1 -1
  61. package/dist/ops/add16.js +1 -1
  62. package/dist/ops/appendCache.js +3 -3
  63. package/dist/ops/attentionMask.js +1 -1
  64. package/dist/ops/concat16.js +2 -2
  65. package/dist/ops/cpu/adamAdjust.js +9 -9
  66. package/dist/ops/cpu/adamMoments.js +5 -5
  67. package/dist/ops/cpu/appendCache.js +6 -6
  68. package/dist/ops/cpu/attentionMask.js +10 -10
  69. package/dist/ops/cpu/fusedSoftmax.js +5 -5
  70. package/dist/ops/cpu/gatherSub.js +9 -9
  71. package/dist/ops/cpu/gelu.js +5 -5
  72. package/dist/ops/cpu/matMul16.js +2 -2
  73. package/dist/ops/cpu/matMulGelu.js +3 -3
  74. package/dist/ops/cpu/matMulMul.js +5 -5
  75. package/dist/ops/cpu/mulDropout.js +1 -1
  76. package/dist/ops/cpu/normRMS.js +7 -7
  77. package/dist/ops/cpu/qkv.js +3 -3
  78. package/dist/ops/cpu/rope.js +5 -5
  79. package/dist/ops/cpu/scatterSub.js +11 -11
  80. package/dist/ops/dot16.js +2 -2
  81. package/dist/ops/gatherSub.js +1 -1
  82. package/dist/ops/gelu.js +2 -2
  83. package/dist/ops/grads/add16.js +4 -4
  84. package/dist/ops/grads/attentionMask.js +2 -2
  85. package/dist/ops/grads/gelu.js +2 -2
  86. package/dist/ops/grads/matMul16.js +3 -3
  87. package/dist/ops/grads/matMulGelu.js +6 -6
  88. package/dist/ops/grads/normRMS.js +4 -4
  89. package/dist/ops/grads/pack16.js +3 -3
  90. package/dist/ops/grads/qkv.js +10 -10
  91. package/dist/ops/grads/rope.js +2 -2
  92. package/dist/ops/grads/softmax16.js +1 -1
  93. package/dist/ops/grads/unpack16.js +2 -2
  94. package/dist/ops/matMul16.js +3 -3
  95. package/dist/ops/matMulGelu.js +2 -2
  96. package/dist/ops/matMulMul.js +1 -1
  97. package/dist/ops/mul16.js +1 -1
  98. package/dist/ops/mulDrop.js +1 -1
  99. package/dist/ops/normRMS.js +1 -1
  100. package/dist/ops/pack16.js +2 -2
  101. package/dist/ops/qkv.js +1 -1
  102. package/dist/ops/reshape16.js +2 -2
  103. package/dist/ops/rope.js +2 -2
  104. package/dist/ops/scatterSub.js +1 -1
  105. package/dist/ops/slice16.js +2 -2
  106. package/dist/ops/softmax16.js +1 -1
  107. package/dist/ops/sub16.js +1 -1
  108. package/dist/ops/sum16.js +2 -2
  109. package/dist/ops/transpose16.js +6 -6
  110. package/dist/ops/unpack16.js +2 -2
  111. package/dist/ops/webgl/adamAdjust.js +2 -2
  112. package/dist/ops/webgl/adamMoments.js +1 -1
  113. package/dist/ops/webgl/appendCache.js +1 -1
  114. package/dist/ops/webgl/attentionMask.js +1 -1
  115. package/dist/ops/webgl/fusedSoftmax.js +4 -4
  116. package/dist/ops/webgl/gatherSub.js +1 -1
  117. package/dist/ops/webgl/gelu.js +2 -2
  118. package/dist/ops/webgl/log.js +3 -3
  119. package/dist/ops/webgl/matMul16.js +8 -8
  120. package/dist/ops/webgl/matMulGelu.js +4 -4
  121. package/dist/ops/webgl/matMulMul.js +7 -7
  122. package/dist/ops/webgl/mulDropout.js +1 -1
  123. package/dist/ops/webgl/normRMS.js +7 -7
  124. package/dist/ops/webgl/qkv.js +1 -1
  125. package/dist/ops/webgl/rope.js +1 -1
  126. package/dist/ops/webgl/scatterSub.js +1 -1
  127. package/dist/ops/webgpu/adamAdjust.js +3 -3
  128. package/dist/ops/webgpu/adamMoments.js +5 -5
  129. package/dist/ops/webgpu/add16.js +1 -1
  130. package/dist/ops/webgpu/appendCache.js +3 -3
  131. package/dist/ops/webgpu/attentionMask.js +2 -2
  132. package/dist/ops/webgpu/attentionMask32_program.js +2 -2
  133. package/dist/ops/webgpu/concat16.js +5 -5
  134. package/dist/ops/webgpu/gatherSub.js +5 -5
  135. package/dist/ops/webgpu/gelu.js +3 -3
  136. package/dist/ops/webgpu/matMul16.js +19 -19
  137. package/dist/ops/webgpu/matMul16_program.js +2 -2
  138. package/dist/ops/webgpu/mul16.js +4 -4
  139. package/dist/ops/webgpu/normRMS.js +6 -6
  140. package/dist/ops/webgpu/normRMSGrad.js +4 -4
  141. package/dist/ops/webgpu/pack16.js +3 -3
  142. package/dist/ops/webgpu/pack16_program.js +2 -2
  143. package/dist/ops/webgpu/qkv.js +8 -8
  144. package/dist/ops/webgpu/rope.js +3 -3
  145. package/dist/ops/webgpu/scatterSub.js +3 -3
  146. package/dist/ops/webgpu/slice16.js +4 -4
  147. package/dist/ops/webgpu/softmax16.js +4 -4
  148. package/dist/ops/webgpu/softmax16_program.js +2 -2
  149. package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
  150. package/dist/ops/webgpu/softmax16grad.js +1 -1
  151. package/dist/ops/webgpu/sub16.js +4 -4
  152. package/dist/ops/webgpu/sum16.js +5 -5
  153. package/dist/ops/webgpu/transpose16.js +2 -2
  154. package/dist/ops/webgpu/transpose16_program.js +2 -2
  155. package/dist/ops/webgpu/transpose16_shared_program.js +3 -3
  156. package/dist/ops/webgpu/unpack16.js +5 -5
  157. package/dist/ops/webgpu/utils/binary_op.js +3 -3
  158. package/dist/ops/webgpu/utils/reductions.js +4 -4
  159. package/dist/{ops-B5yanEdW.js → ops-C2_OXuZ4.js} +69 -69
  160. package/dist/{pack16-nQ6JaLo-.js → pack16-atD0eYRm.js} +9 -9
  161. package/dist/patches/webgpu_backend.js +6 -6
  162. package/dist/patches/webgpu_base.js +1 -1
  163. package/dist/patches/webgpu_program.js +8 -8
  164. package/dist/{random_width-or-CEftb.js → random_width-BN4wGJaW.js} +33 -33
  165. package/dist/range-DKmP1-OQ.js +10 -0
  166. package/dist/relu-BsXmGzzu.js +9 -0
  167. package/dist/{reshape-ByE68wS9.js → reshape-BI0yzp1T.js} +1 -1
  168. package/dist/{resize_nearest_neighbor-B19mCEg2.js → resize_nearest_neighbor-BA_BX-ub.js} +26 -26
  169. package/dist/{rope-Ir4mTyD1.js → rope-DJ7Y7c-u.js} +1 -1
  170. package/dist/{scatter_nd_util-lvSiX8q4.js → scatter_nd_util-k9MUVUkn.js} +1 -1
  171. package/dist/{selu_util-kbhpTdYD.js → selu_util-DyW0X1WG.js} +5 -5
  172. package/dist/{shared-DT1TkE6w.js → shared-Q3BS6T03.js} +1 -1
  173. package/dist/{shared-dntlHIDQ.js → shared-nnSWpC3u.js} +86 -86
  174. package/dist/{slice-BfEGSH82.js → slice-wBNvzVyz.js} +1 -1
  175. package/dist/{slice_util-uTKwiEpW.js → slice_util-zN8KFC5I.js} +1 -1
  176. package/dist/{softmax-CA5jFsLR.js → softmax-DfuYyjMh.js} +1 -1
  177. package/dist/split-BYrLboMq.js +9 -0
  178. package/dist/squeeze-Bk8Brcct.js +10 -0
  179. package/dist/{stack-Cf4n9h0N.js → stack-CDWShFHF.js} +1 -1
  180. package/dist/{step-CINUs5QB.js → step-BS5JXRR6.js} +23 -23
  181. package/dist/{sum-DWAtNGez.js → sum-BPUfDB2X.js} +3 -3
  182. package/dist/tensor-CEt9Nm2s.js +8 -0
  183. package/dist/tensor1d-Cc_KCIDg.js +11 -0
  184. package/dist/{tensor2d-Bs9wZRc7.js → tensor2d-BN97fF71.js} +3 -3
  185. package/dist/{tensor4d-BARPdTaS.js → tensor4d-vuDDgdUI.js} +1 -1
  186. package/dist/{tfjs_backend-y1cvNhLA.js → tfjs_backend-806hyYve.js} +49 -49
  187. package/dist/{tile-mbfagpsB.js → tile-OWUvpIVt.js} +3 -3
  188. package/dist/tokeniser/BaseTokeniser.d.ts +25 -0
  189. package/dist/tokeniser/BaseTokeniser.js +94 -0
  190. package/dist/tokeniser/CharTokeniser.d.ts +10 -9
  191. package/dist/tokeniser/CharTokeniser.js +44 -30
  192. package/dist/tokeniser/bpe.d.ts +10 -9
  193. package/dist/tokeniser/bpe.js +67 -52
  194. package/dist/tokeniser/type.d.ts +14 -5
  195. package/dist/training/Adam.js +2 -2
  196. package/dist/training/AdamExt.js +1 -1
  197. package/dist/training/DatasetBuilder.d.ts +3 -3
  198. package/dist/training/DatasetBuilder.js +34 -38
  199. package/dist/training/FullTrainer.js +1 -1
  200. package/dist/training/Trainer.d.ts +4 -3
  201. package/dist/training/Trainer.js +22 -25
  202. package/dist/training/sparseCrossEntropy.js +3 -3
  203. package/dist/training/tasks/ConversationTask.d.ts +11 -0
  204. package/dist/training/tasks/ConversationTask.js +26 -0
  205. package/dist/training/tasks/PretrainingTask.d.ts +11 -0
  206. package/dist/training/tasks/PretrainingTask.js +34 -0
  207. package/dist/training/tasks/StartSentenceTask.d.ts +12 -0
  208. package/dist/training/tasks/StartSentenceTask.js +42 -0
  209. package/dist/training/tasks/Task.d.ts +8 -0
  210. package/dist/training/tasks/Task.js +41 -0
  211. package/dist/{transpose-ClWiBS_b.js → transpose-BUkQCJp9.js} +6 -6
  212. package/dist/{unsorted_segment_sum-BDDhB_E6.js → unsorted_segment_sum-BljxHhCY.js} +5 -5
  213. package/dist/utilities/dummy.js +3 -3
  214. package/dist/utilities/multinomialCPU.js +2 -2
  215. package/dist/utilities/packed.js +1 -1
  216. package/dist/utilities/performance.js +1 -1
  217. package/dist/utilities/profile.js +1 -1
  218. package/dist/utilities/safetensors.js +2 -2
  219. package/dist/utilities/sentences.d.ts +1 -1
  220. package/dist/utilities/sentences.js +11 -11
  221. package/dist/utilities/weights.js +2 -2
  222. package/dist/{variable-WawDEaAb.js → variable-DPt_Iuog.js} +1 -1
  223. package/dist/{webgpu_program-DuOXPQol.js → webgpu_program-BpWRlghH.js} +3 -3
  224. package/dist/{webgpu_util-RxEF33Rj.js → webgpu_util-DMiKzzQM.js} +7 -7
  225. package/dist/{zeros-KnWaWf-X.js → zeros-5YROwwUH.js} +2 -2
  226. package/dist/{zeros_like-DvE73F4e.js → zeros_like-De4n1C3m.js} +71 -71
  227. package/package.json +1 -1
  228. package/dist/complex-DjxcVmoX.js +0 -11
  229. package/dist/concat-BV8bt5H-.js +0 -17
  230. package/dist/expand_dims-DT4tEPwA.js +0 -11
  231. package/dist/log_sum_exp-ngO0-4pK.js +0 -39
  232. package/dist/mat_mul-SjpJRLyL.js +0 -11
  233. package/dist/range-BklejeeW.js +0 -10
  234. package/dist/relu-CP0ZcxWO.js +0 -9
  235. package/dist/split-CVLc0w--.js +0 -9
  236. package/dist/squeeze-C7Z2srUo.js +0 -10
  237. package/dist/tensor-DJoc7gJU.js +0 -8
  238. package/dist/tensor1d-D11P_7Dp.js +0 -11
@@ -1,6 +1,6 @@
1
- import { e as u } from "../../webgpu_program-DuOXPQol.js";
2
- import { f as h, c as p } from "../../webgpu_util-RxEF33Rj.js";
3
- import { p as c, ab as r } from "../../index-DOvlwCh-.js";
1
+ import { e as u } from "../../webgpu_program-BpWRlghH.js";
2
+ import { f as h, c as p } from "../../webgpu_util-DMiKzzQM.js";
3
+ import { f as c, a6 as r } from "../../index-Duu1Lvvv.js";
4
4
  class l {
5
5
  variableNames = ["labels", "logits", "values"];
6
6
  outputShape;
@@ -31,9 +31,9 @@ function d(e) {
31
31
  const n = new l(i);
32
32
  return o.runWebGPUProgram(n, [a, t, s], "float32");
33
33
  }
34
- const b = {
34
+ const f = {
35
35
  kernelName: "EfficientGatherSub",
36
36
  backendName: "webgpu",
37
37
  kernelFunc: d
38
38
  };
39
- c(b);
39
+ c(f);
@@ -1,6 +1,6 @@
1
- import { p as d } from "../../index-DOvlwCh-.js";
2
- import { e as s } from "../../webgpu_program-DuOXPQol.js";
3
- import { f as n, c as o } from "../../webgpu_util-RxEF33Rj.js";
1
+ import { f as d } from "../../index-Duu1Lvvv.js";
2
+ import { e as s } from "../../webgpu_program-BpWRlghH.js";
3
+ import { f as n, c as o } from "../../webgpu_util-DMiKzzQM.js";
4
4
  import { isPackedTensor as l } from "../../utilities/packed.js";
5
5
  const u = 0.7978845608028654, r = 0.044715;
6
6
  class c {
@@ -1,25 +1,25 @@
1
- import { p as Q, m as P, b as B, h as y, a3 as V } from "../../index-DOvlwCh-.js";
2
- import { isPackedTensor as R } from "../../utilities/packed.js";
3
- import { reshape16 as U } from "../reshape16.js";
4
- import { matMulMul as X } from "../matMulMul.js";
5
- import { matMulGelu as Y } from "../matMulGelu.js";
6
- import Z from "./matMul16_program.js";
7
- import { m as _ } from "../../mat_mul-SjpJRLyL.js";
8
- import { r as x } from "../../reshape-ByE68wS9.js";
9
- import { t as C } from "../../transpose-ClWiBS_b.js";
1
+ import { f as J, m as P, b as B, U, _ as Q } from "../../index-Duu1Lvvv.js";
2
+ import { isPackedTensor as y } from "../../utilities/packed.js";
3
+ import { reshape16 as R } from "../reshape16.js";
4
+ import { matMulMul as V } from "../matMulMul.js";
5
+ import { matMulGelu as X } from "../matMulGelu.js";
6
+ import Y from "./matMul16_program.js";
7
+ import { m as Z } from "../../mat_mul-Bn2BDpT4.js";
8
+ import { r as x } from "../../reshape-BI0yzp1T.js";
9
+ import { t as C } from "../../transpose-BUkQCJp9.js";
10
10
  function $(p) {
11
- const { A: e, B: s } = p.inputs, { transposeA: d, transposeB: f, scale: i, activation: k, scaleA: c, scaleB: u, forceOutputShape: o, perm: h, causalMask: g, pastLen: E } = p.attrs, F = p.backend, S = !R(e), M = !R(s);
11
+ const { A: e, B: s } = p.inputs, { transposeA: d, transposeB: f, scale: i, activation: k, scaleA: c, scaleB: u, forceOutputShape: o, perm: h, causalMask: g, pastLen: E } = p.attrs, F = p.backend, S = !y(e), M = !y(s);
12
12
  if (S && M) {
13
13
  const A = c !== void 0 ? P(e, B(c)) : e, b = u !== void 0 ? P(s, B(u)) : s;
14
14
  if (g)
15
15
  throw new Error("Causal mask is not supported for unpacked MatMul16.");
16
16
  let a;
17
- if (i !== void 0 ? a = X(A, b, B(i), d, f) : k === "gelu" ? a = Y(A, b) : a = _(A, b, d, f), h)
17
+ if (i !== void 0 ? a = V(A, b, B(i), d, f) : k === "gelu" ? a = X(A, b) : a = Z(A, b, d, f), h)
18
18
  if (o) {
19
19
  const r = x(a, o);
20
20
  a.dispose();
21
- const J = C(r, h);
22
- return r.dispose(), J;
21
+ const H = C(r, h);
22
+ return r.dispose(), H;
23
23
  } else {
24
24
  const r = C(a, h);
25
25
  return a.dispose(), r;
@@ -34,23 +34,23 @@ function $(p) {
34
34
  throw new Error("When using mixed precision, A must be packed if B is packed.");
35
35
  if (!S && M)
36
36
  throw new Error("When using mixed precision, B must be packed if A is packed.");
37
- const l = e.shape.length, m = s.shape.length, W = e.shape.slice(0, -2), z = s.shape.slice(0, -2), v = y(W), w = y(z), I = V(e.shape.slice(0, -2), s.shape.slice(0, -2)), N = Math.max(v, w), K = e.shape[l - 2], L = s.shape[m - 2], T = e.shape[l - 1] * 2, j = s.shape[m - 1] * 2, D = U(e, [v, e.shape[l - 2], e.shape[l - 1]]), G = U(s, [w, s.shape[m - 2], s.shape[m - 1]]), t = new Z(N, K, L, T, j, d, f), n = [];
37
+ const l = e.shape.length, m = s.shape.length, W = e.shape.slice(0, -2), z = s.shape.slice(0, -2), v = U(W), w = U(z), I = Q(e.shape.slice(0, -2), s.shape.slice(0, -2)), N = Math.max(v, w), K = e.shape[l - 2], L = s.shape[m - 2], T = e.shape[l - 1] * 2, _ = s.shape[m - 1] * 2, D = R(e, [v, e.shape[l - 2], e.shape[l - 1]]), G = R(s, [w, s.shape[m - 2], s.shape[m - 1]]), t = new Y(N, K, L, T, _, d, f), n = [];
38
38
  i !== void 0 && (t.useScale(), n.push({ type: "float32", data: [i] })), c !== void 0 && (t.useScaleA(), n.push({ type: "float32", data: [c] })), u !== void 0 && (t.useScaleB(), n.push({ type: "float32", data: [u] })), k !== void 0 && t.useActivation(k), g && (t.useCausalMask(), n.push({ type: "int32", data: [E || 0] }));
39
39
  const O = t.outputShape.length;
40
40
  o && (p.attrs.originalShape = t.outputShape);
41
- const q = o ?? I.concat([t.outputShape[O - 2], t.outputShape[O - 1]]);
42
- t.setOutputShape(q, h);
43
- const H = F.runWebGPUProgram(
41
+ const j = o ?? I.concat([t.outputShape[O - 2], t.outputShape[O - 1]]);
42
+ t.setOutputShape(j, h);
43
+ const q = F.runWebGPUProgram(
44
44
  t,
45
45
  [D, G],
46
46
  "packedF16",
47
47
  n.length > 0 ? n : void 0
48
48
  );
49
- return D.dispose(), G.dispose(), H;
49
+ return D.dispose(), G.dispose(), q;
50
50
  }
51
51
  const ee = {
52
52
  kernelName: "MatMul16",
53
53
  backendName: "webgpu",
54
54
  kernelFunc: $
55
55
  };
56
- Q(ee);
56
+ J(ee);
@@ -1,5 +1,5 @@
1
- import { h as f } from "../../index-DOvlwCh-.js";
2
- import { e as h } from "../../webgpu_program-DuOXPQol.js";
1
+ import { U as f } from "../../index-Duu1Lvvv.js";
2
+ import { e as h } from "../../webgpu_program-BpWRlghH.js";
3
3
  class B {
4
4
  variableNames = ["A", "B"];
5
5
  outputShape;
@@ -1,8 +1,8 @@
1
- import { p as t } from "../../index-DOvlwCh-.js";
2
- import { BinaryOpProgram as p } from "./utils/binary_op.js";
3
- import { B as m } from "../../binary_op_util-pKXltfxI.js";
1
+ import { f as t } from "../../index-Duu1Lvvv.js";
2
+ import { BinaryOpProgram as m } from "./utils/binary_op.js";
3
+ import { B as p } from "../../binary_op_util-pKXltfxI.js";
4
4
  function s(e) {
5
- const { a: r, b: n } = e.inputs, o = e.backend, a = new p(m.MUL, r.shape, n.shape);
5
+ const { a: r, b: n } = e.inputs, o = e.backend, a = new m(p.MUL, r.shape, n.shape);
6
6
  return o.runWebGPUProgram(a, [r, n], "packedF16");
7
7
  }
8
8
  const c = {
@@ -1,12 +1,12 @@
1
- import { p as g, ab as l } from "../../index-DOvlwCh-.js";
1
+ import { f as g, a6 as l } from "../../index-Duu1Lvvv.js";
2
2
  import { createReduceInfo as w, reduce as S } from "./utils/reductions.js";
3
3
  import { isPackedTensor as d } from "../../utilities/packed.js";
4
- import { p as f } from "../../pack16-nQ6JaLo-.js";
5
- import b from "./normRMS16_program.js";
6
- import z from "./normRMS32_program.js";
7
- import N from "./utils/deviceInfo.js";
4
+ import { p as f } from "../../pack16-atD0eYRm.js";
5
+ import z from "./normRMS16_program.js";
6
+ import N from "./normRMS32_program.js";
7
+ import b from "./utils/deviceInfo.js";
8
8
  function P(c) {
9
- const { x: e, gamma: s } = c.inputs, m = c.backend, i = N(m), t = d(e), a = d(s), n = t || a, r = !n || t ? e : f(e), p = !n || a ? s : f(s), h = [r, p], o = w(h, -1), u = n ? new b(i, o) : new z(i, o);
9
+ const { x: e, gamma: s } = c.inputs, m = c.backend, i = b(m), t = d(e), a = d(s), n = t || a, r = !n || t ? e : f(e), p = !n || a ? s : f(s), h = [r, p], o = w(h, -1), u = n ? new z(i, o) : new N(i, o);
10
10
  if (l(p.shape, [r.shape[r.shape.length - 1]], "Error in RMSNorm: "), e.shape.length !== 3)
11
11
  throw new Error(`rmsNormGPU: input rank ${e.shape.length} not supported, only rank 3 is supported`);
12
12
  if (o.inSize !== r.shape[r.shape.length - 1])
@@ -1,8 +1,8 @@
1
- import { p as _, ab as y, e as D } from "../../index-DOvlwCh-.js";
1
+ import { f as _, a6 as y, e as D } from "../../index-Duu1Lvvv.js";
2
2
  import { createReduceInfo as X } from "./utils/reductions.js";
3
- import { f as $ } from "../../webgpu_util-RxEF33Rj.js";
4
- import { e as M } from "../../webgpu_program-DuOXPQol.js";
5
- import { p as k, u as R } from "../../pack16-nQ6JaLo-.js";
3
+ import { f as $ } from "../../webgpu_util-DMiKzzQM.js";
4
+ import { e as M } from "../../webgpu_program-BpWRlghH.js";
5
+ import { p as k, u as R } from "../../pack16-atD0eYRm.js";
6
6
  import { isPackedTensor as h } from "../../utilities/packed.js";
7
7
  import { reshape16 as L } from "../reshape16.js";
8
8
  import { sum16 as P } from "../sum16.js";
@@ -1,4 +1,4 @@
1
- import { p as i } from "../../index-DOvlwCh-.js";
1
+ import { f as i } from "../../index-Duu1Lvvv.js";
2
2
  import p from "./pack16_program.js";
3
3
  function m(e) {
4
4
  const { x: n } = e.inputs, { scaling: a, padding: r } = e.attrs, s = e.backend;
@@ -10,9 +10,9 @@ function m(e) {
10
10
  const c = [{ type: "float32", data: [a] }];
11
11
  return s.runWebGPUProgram(t, [n], "packedF16", o ? c : void 0);
12
12
  }
13
- const k = {
13
+ const f = {
14
14
  kernelName: "Pack16",
15
15
  backendName: "webgpu",
16
16
  kernelFunc: m
17
17
  };
18
- i(k);
18
+ i(f);
@@ -1,5 +1,5 @@
1
- import { f as o, c as a } from "../../webgpu_util-RxEF33Rj.js";
2
- import { e as s } from "../../webgpu_program-DuOXPQol.js";
1
+ import { f as o, c as a } from "../../webgpu_util-DMiKzzQM.js";
2
+ import { e as s } from "../../webgpu_program-BpWRlghH.js";
3
3
  class h {
4
4
  outputShape;
5
5
  shaderKey = "Pack16";
@@ -1,12 +1,12 @@
1
- import { p as h, ab as l } from "../../index-DOvlwCh-.js";
2
- import { b as f } from "../../matMul16-BWRSOCWB.js";
1
+ import { f as h, a6 as f } from "../../index-Duu1Lvvv.js";
2
+ import { b as l } from "../../matMul16-xswmhSuF.js";
3
3
  import { slice16 as a } from "../slice16.js";
4
4
  import { isPackedTensor as u } from "../../utilities/packed.js";
5
- function b(i) {
5
+ function k(i) {
6
6
  const { x: n, kernel: c } = i.inputs, { heads: e } = i.attrs, r = n.shape[0], t = n.shape[1], s = n.shape[2], p = u(n);
7
- if (l(c.shape, [p ? s * 2 : s, 3 * s], "Error in QKV: "), s % e !== 0)
7
+ if (f(c.shape, [p ? s * 2 : s, 3 * s], "Error in QKV: "), s % e !== 0)
8
8
  throw new Error(`Channel dimension ${s} must be divisible by number of heads ${e} in QKV.`);
9
- const o = f(n, c, !1, !1, {
9
+ const o = l(n, c, !1, !1, {
10
10
  forceOutputShape: [r, t, 3 * e, s / e],
11
11
  perm: [0, 2, 1, 3]
12
12
  }), m = [
@@ -16,9 +16,9 @@ function b(i) {
16
16
  ];
17
17
  return o.dispose(), m;
18
18
  }
19
- const k = {
19
+ const b = {
20
20
  kernelName: "QKV",
21
21
  backendName: "webgpu",
22
- kernelFunc: b
22
+ kernelFunc: k
23
23
  };
24
- h(k);
24
+ h(b);
@@ -1,7 +1,7 @@
1
1
  import { isPackedTensor as y } from "../../utilities/packed.js";
2
- import { e as c } from "../../webgpu_program-DuOXPQol.js";
3
- import { f as x, c as l } from "../../webgpu_util-RxEF33Rj.js";
4
- import { p as w, ab as b } from "../../index-DOvlwCh-.js";
2
+ import { e as c } from "../../webgpu_program-BpWRlghH.js";
3
+ import { f as x, c as l } from "../../webgpu_util-DMiKzzQM.js";
4
+ import { f as w, a6 as b } from "../../index-Duu1Lvvv.js";
5
5
  class v {
6
6
  variableNames = ["x", "sin", "cos"];
7
7
  outputShape;
@@ -1,6 +1,6 @@
1
- import { e as p } from "../../webgpu_program-DuOXPQol.js";
2
- import { f as u, c as d } from "../../webgpu_util-RxEF33Rj.js";
3
- import { p as h, ab as o } from "../../index-DOvlwCh-.js";
1
+ import { e as p } from "../../webgpu_program-BpWRlghH.js";
2
+ import { f as u, c as d } from "../../webgpu_util-DMiKzzQM.js";
3
+ import { f as h, a6 as o } from "../../index-Duu1Lvvv.js";
4
4
  class b {
5
5
  variableNames = ["labels", "softmaxProbs", "dy"];
6
6
  outputShape;
@@ -1,7 +1,7 @@
1
- import { b as u, c as m, e as l } from "../../webgpu_program-DuOXPQol.js";
2
- import { f, c as g } from "../../webgpu_util-RxEF33Rj.js";
3
- import { p as S, h as k } from "../../index-DOvlwCh-.js";
4
- import { p as y, a as $ } from "../../slice_util-uTKwiEpW.js";
1
+ import { b as u, c as m, e as l } from "../../webgpu_program-BpWRlghH.js";
2
+ import { f, c as g } from "../../webgpu_util-DMiKzzQM.js";
3
+ import { f as S, U as k } from "../../index-Duu1Lvvv.js";
4
+ import { p as y, a as $ } from "../../slice_util-zN8KFC5I.js";
5
5
  function x(o) {
6
6
  switch (o) {
7
7
  case 1:
@@ -1,12 +1,12 @@
1
- import { p as h, h as S, e as b } from "../../index-DOvlwCh-.js";
1
+ import { f as S, U as h, e as b } from "../../index-Duu1Lvvv.js";
2
2
  import { reshape16 as d } from "../reshape16.js";
3
3
  import x from "./softmax16_program.js";
4
4
  import k from "./softmax16_subgroup_program.js";
5
5
  import l from "./utils/deviceInfo.js";
6
- import { r as z } from "../../reshape-ByE68wS9.js";
6
+ import { r as z } from "../../reshape-BI0yzp1T.js";
7
7
  function F(a) {
8
8
  const { inputs: t, backend: o, attrs: p } = a, { logits: e } = t, { dim: r } = p, m = o.subgroupMinSize, i = o.subgroupMaxSize, c = l(o).subgroupsSupported, s = z(e, [
9
- S(e.shape) / e.shape[r],
9
+ h(e.shape) / e.shape[r],
10
10
  e.shape[r]
11
11
  ]), u = c ? new k(s.shape, m, i) : new x(s.shape), f = o.runWebGPUProgram(u, [s], "packedF16");
12
12
  s.dispose();
@@ -18,4 +18,4 @@ const I = {
18
18
  backendName: "webgpu",
19
19
  kernelFunc: F
20
20
  };
21
- h(I);
21
+ S(I);
@@ -1,5 +1,5 @@
1
- import { e } from "../../webgpu_program-DuOXPQol.js";
2
- import { f as o } from "../../webgpu_util-RxEF33Rj.js";
1
+ import { e } from "../../webgpu_program-BpWRlghH.js";
2
+ import { f as o } from "../../webgpu_util-DMiKzzQM.js";
3
3
  class i {
4
4
  variableNames = ["logits"];
5
5
  outputShape;
@@ -1,5 +1,5 @@
1
- import { e as o } from "../../webgpu_program-DuOXPQol.js";
2
- import { f as u } from "../../webgpu_util-RxEF33Rj.js";
1
+ import { e as o } from "../../webgpu_program-BpWRlghH.js";
2
+ import { f as u } from "../../webgpu_util-DMiKzzQM.js";
3
3
  class i {
4
4
  variableNames = ["logits"];
5
5
  outputShape;
@@ -1,4 +1,4 @@
1
- import { p } from "../../index-DOvlwCh-.js";
1
+ import { f as p } from "../../index-Duu1Lvvv.js";
2
2
  import { createReduceInfo as m, reduce as l, ReduceProgram as i } from "./utils/reductions.js";
3
3
  import { isPackedTensor as n } from "../../utilities/packed.js";
4
4
  import k from "./utils/deviceInfo.js";
@@ -1,8 +1,8 @@
1
- import { p as t } from "../../index-DOvlwCh-.js";
2
- import { BinaryOpProgram as p } from "./utils/binary_op.js";
3
- import { B as s } from "../../binary_op_util-pKXltfxI.js";
1
+ import { f as t } from "../../index-Duu1Lvvv.js";
2
+ import { BinaryOpProgram as s } from "./utils/binary_op.js";
3
+ import { B as p } from "../../binary_op_util-pKXltfxI.js";
4
4
  function c(e) {
5
- const { a: r, b: n } = e.inputs, o = e.backend, a = new p(s.SUB, r.shape, n.shape);
5
+ const { a: r, b: n } = e.inputs, o = e.backend, a = new s(p.SUB, r.shape, n.shape);
6
6
  return o.runWebGPUProgram(a, [r, n], "packedF16");
7
7
  }
8
8
  const m = {
@@ -1,10 +1,10 @@
1
1
  import { createReduceInfo as g, reduce as h, ReduceProgram as x } from "./utils/reductions.js";
2
- import { p as k, af as A } from "../../index-DOvlwCh-.js";
2
+ import { f as k, af as A } from "../../index-Duu1Lvvv.js";
3
3
  import { isPackedTensor as P } from "../../utilities/packed.js";
4
4
  import { transpose16 as b } from "../transpose16.js";
5
5
  import I from "./utils/deviceInfo.js";
6
- import { s as w } from "../../sum-DWAtNGez.js";
7
- import { a as D, b as K } from "../../axis_util-BaG7mf5A.js";
6
+ import { s as w } from "../../sum-BPUfDB2X.js";
7
+ import { a as D, b as K } from "../../axis_util-DGqbT-FX.js";
8
8
  class v extends x {
9
9
  shaderKey = "sum16";
10
10
  constructor(e, t, o) {
@@ -20,14 +20,14 @@ class v extends x {
20
20
  }
21
21
  }
22
22
  function y(r) {
23
- const { x: e } = r.inputs, { axis: t, keepDims: o } = r.attrs, a = r.backend, c = [], p = I(a), m = P(e);
23
+ const { x: e } = r.inputs, { axis: t, keepDims: o } = r.attrs, a = r.backend, c = [], u = I(a), m = P(e);
24
24
  if (!m)
25
25
  return w(e, t, o);
26
26
  let n = A(t ?? -1, e.shape);
27
27
  const i = D(n, e.shape.length);
28
28
  let s = e;
29
29
  i != null && (s = b(e, i), n = K(n.length, s.shape.length), c.push(s));
30
- const u = g([s], -1), f = new v(p, u, m), d = h(f, [s], a);
30
+ const p = g([s], -1), f = new v(u, p, m), d = h(f, [s], a);
31
31
  return c.forEach((l) => l.dispose()), d;
32
32
  }
33
33
  const N = {
@@ -1,9 +1,9 @@
1
1
  import { isPackedTensor as d } from "../../utilities/packed.js";
2
- import { p as k, e as l } from "../../index-DOvlwCh-.js";
2
+ import { f as k, e as l } from "../../index-Duu1Lvvv.js";
3
3
  import { reshape16 as u } from "../reshape16.js";
4
4
  import P from "./transpose16_shared_program.js";
5
5
  import b from "./transpose16_program.js";
6
- import { t as T } from "../../transpose-ClWiBS_b.js";
6
+ import { t as T } from "../../transpose-BUkQCJp9.js";
7
7
  function w(p) {
8
8
  const { inputs: h, attrs: i } = p, { x: e } = h, { perm: r } = i, a = p.backend, m = d(e);
9
9
  if (m && r[r.length - 1] !== e.shape.length - 1) {
@@ -1,5 +1,5 @@
1
- import { f as a, c as i } from "../../webgpu_util-RxEF33Rj.js";
2
- import { b as h, e as d, c as n } from "../../webgpu_program-DuOXPQol.js";
1
+ import { f as a, c as i } from "../../webgpu_util-DMiKzzQM.js";
2
+ import { b as h, e as d, c as n } from "../../webgpu_program-BpWRlghH.js";
3
3
  function p(r) {
4
4
  const e = r.length;
5
5
  if (e > 6)
@@ -1,6 +1,6 @@
1
- import { c as a } from "../../webgpu_util-RxEF33Rj.js";
2
- import { e as l } from "../../webgpu_program-DuOXPQol.js";
3
- import { l as p } from "../../index-DOvlwCh-.js";
1
+ import { c as a } from "../../webgpu_util-DMiKzzQM.js";
2
+ import { e as l } from "../../webgpu_program-BpWRlghH.js";
3
+ import { x as p } from "../../index-Duu1Lvvv.js";
4
4
  class n {
5
5
  variableNames = ["A"];
6
6
  outputShape;
@@ -1,6 +1,6 @@
1
- import { f as c, c as r } from "../../webgpu_util-RxEF33Rj.js";
2
- import { e as u } from "../../webgpu_program-DuOXPQol.js";
3
- import { p } from "../../index-DOvlwCh-.js";
1
+ import { f as c, c as r } from "../../webgpu_util-DMiKzzQM.js";
2
+ import { e as u } from "../../webgpu_program-BpWRlghH.js";
3
+ import { f as p } from "../../index-Duu1Lvvv.js";
4
4
  class l {
5
5
  outputShape;
6
6
  shaderKey = "Unpack16";
@@ -40,9 +40,9 @@ function h(e) {
40
40
  const o = [{ type: "float32", data: [1 / a] }];
41
41
  return i.runWebGPUProgram(n, [t], "float32", s ? o : void 0);
42
42
  }
43
- const d = {
43
+ const f = {
44
44
  kernelName: "Unpack16",
45
45
  backendName: "webgpu",
46
46
  kernelFunc: h
47
47
  };
48
- p(d);
48
+ p(f);
@@ -1,8 +1,8 @@
1
- import { f as s, c as n } from "../../../webgpu_util-RxEF33Rj.js";
1
+ import { f as s, c as n } from "../../../webgpu_util-DMiKzzQM.js";
2
2
  import { g as p } from "../../../binary_op_util-pKXltfxI.js";
3
3
  import { B as b } from "../../../binary_op_util-pKXltfxI.js";
4
- import { a3 as u, ag as r } from "../../../index-DOvlwCh-.js";
5
- import { e as l } from "../../../webgpu_program-DuOXPQol.js";
4
+ import { _ as u, ag as r } from "../../../index-Duu1Lvvv.js";
5
+ import { e as l } from "../../../webgpu_program-BpWRlghH.js";
6
6
  class x {
7
7
  dispatch;
8
8
  dispatchLayout;
@@ -1,8 +1,8 @@
1
- import { af as l, h as c, e as S } from "../../../index-DOvlwCh-.js";
2
- import { e as a } from "../../../webgpu_program-DuOXPQol.js";
1
+ import { af as l, U as c, e as S } from "../../../index-Duu1Lvvv.js";
2
+ import { e as a } from "../../../webgpu_program-BpWRlghH.js";
3
3
  import { reshape16 as b } from "../../reshape16.js";
4
- import { f } from "../../../webgpu_util-RxEF33Rj.js";
5
- import { c as h } from "../../../axis_util-BaG7mf5A.js";
4
+ import { f } from "../../../webgpu_util-DMiKzzQM.js";
5
+ import { c as h } from "../../../axis_util-DGqbT-FX.js";
6
6
  function d(e, u, t, i) {
7
7
  return e && !u ? `
8
8
  bestValue = subgroupAdd(bestValue);