@genai-fi/nanogpt 0.10.2 → 0.10.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (249) hide show
  1. package/dist/Generator.js +11761 -171
  2. package/dist/{RealDiv-zz7FpkKX.js → RealDiv-KAPDe8zB.js} +23 -25
  3. package/dist/Reshape-BYkmUnAv.js +14 -0
  4. package/dist/{Reshape-CHdUjC72.js → Reshape-Zt6eb7yh.js} +18 -20
  5. package/dist/TeachableLLM.js +10 -11
  6. package/dist/{axis_util-BsIr9ZNu.js → axis_util-BaG7mf5A.js} +3 -3
  7. package/dist/backend.js +2 -2
  8. package/dist/{backend_util-B1XRLuq9.js → backend_util-RCe-rHaj.js} +72 -73
  9. package/dist/{backend_webgpu-CqpfEImu.js → backend_webgpu-DE3ACOLx.js} +45 -47
  10. package/dist/broadcast_to-B3eYlZm7.js +28 -0
  11. package/dist/checks/appendCache.js +2 -2
  12. package/dist/checks/attentionMask.js +3 -3
  13. package/dist/checks/gelu.js +2 -2
  14. package/dist/checks/matMulGelu.js +7 -11
  15. package/dist/checks/normRMS.js +9 -9
  16. package/dist/checks/normRMSGrad.js +3 -3
  17. package/dist/checks/packUnpack.js +2 -2
  18. package/dist/checks/qkv.js +12 -13
  19. package/dist/checks/rope.js +2 -2
  20. package/dist/clip_by_value-BnO7-a88.js +12 -0
  21. package/dist/complex-DjxcVmoX.js +11 -0
  22. package/dist/concat-BV8bt5H-.js +17 -0
  23. package/dist/{concat_util-iBYIyuQe.js → concat_util-DpW8mL_l.js} +1 -1
  24. package/dist/{dataset-D2P7rHAw.js → dataset-BcwmTGYc.js} +137 -139
  25. package/dist/dropout-BcvN9JYi.js +92 -0
  26. package/dist/expand_dims-DT4tEPwA.js +11 -0
  27. package/dist/{exports_initializers-CZSUJoVE.js → exports_initializers-Hta_rEnm.js} +1 -1
  28. package/dist/floor-D5QdR_le.js +9 -0
  29. package/dist/gather-D3JcZUaI.js +9 -0
  30. package/dist/{gelu-Bmhopi0J.js → gelu-CjNPL4OH.js} +10 -11
  31. package/dist/{gpgpu_math-DsCcikas.js → gpgpu_math-DAOmgtXR.js} +841 -1015
  32. package/dist/{index-DRyE072i.js → index-BwexR4lA.js} +262 -263
  33. package/dist/index-DOvlwCh-.js +3520 -0
  34. package/dist/{kernel_funcs_utils-CWfOAPGO.js → kernel_funcs_utils-CCzYdUZg.js} +130 -132
  35. package/dist/layers/BaseLayer.js +15 -16
  36. package/dist/layers/CausalSelfAttention.js +6 -6
  37. package/dist/layers/MLP.js +4 -4
  38. package/dist/layers/PositionEmbedding.js +7 -7
  39. package/dist/layers/RMSNorm.js +3 -3
  40. package/dist/layers/RoPECache.js +9 -9
  41. package/dist/layers/TiedEmbedding.js +6 -6
  42. package/dist/layers/TransformerBlock.js +1 -1
  43. package/dist/loader/loadTransformers.js +1 -1
  44. package/dist/loader/oldZipLoad.js +13 -14
  45. package/dist/log_sum_exp-ngO0-4pK.js +39 -0
  46. package/dist/main.js +49 -50
  47. package/dist/{matMul16-fEAJ4smh.js → matMul16-BWRSOCWB.js} +14 -15
  48. package/dist/matMulGelu-CzfgT6Wq.js +163 -0
  49. package/dist/mat_mul-SjpJRLyL.js +11 -0
  50. package/dist/mod-AnXEvvpo.js +11 -0
  51. package/dist/models/NanoGPTV1.js +2 -2
  52. package/dist/models/model.js +13 -14
  53. package/dist/ones-D2rT0xk2.js +14 -0
  54. package/dist/ops/adamAdjust.js +1 -1
  55. package/dist/ops/adamMoments.js +1 -1
  56. package/dist/ops/add16.js +1 -1
  57. package/dist/ops/appendCache.js +3 -3
  58. package/dist/ops/attentionMask.js +1 -1
  59. package/dist/ops/concat16.js +2 -2
  60. package/dist/ops/cpu/adamAdjust.js +13 -14
  61. package/dist/ops/cpu/adamMoments.js +6 -7
  62. package/dist/ops/cpu/appendCache.js +7 -8
  63. package/dist/ops/cpu/attentionMask.js +7 -7
  64. package/dist/ops/cpu/fusedSoftmax.js +10 -11
  65. package/dist/ops/cpu/gatherSub.js +9 -10
  66. package/dist/ops/cpu/gelu.js +9 -10
  67. package/dist/ops/cpu/matMul16.js +6 -7
  68. package/dist/ops/cpu/matMulGelu.js +5 -6
  69. package/dist/ops/cpu/matMulMul.js +3 -4
  70. package/dist/ops/cpu/mulDropout.js +3 -4
  71. package/dist/ops/cpu/normRMS.js +10 -11
  72. package/dist/ops/cpu/qkv.js +8 -9
  73. package/dist/ops/cpu/rope.js +5 -6
  74. package/dist/ops/cpu/scatterSub.js +17 -19
  75. package/dist/ops/dot16.js +2 -2
  76. package/dist/ops/gatherSub.js +1 -1
  77. package/dist/ops/gelu.js +2 -2
  78. package/dist/ops/grads/add16.js +11 -12
  79. package/dist/ops/grads/attentionMask.js +5 -6
  80. package/dist/ops/grads/gelu.js +3 -4
  81. package/dist/ops/grads/matMul16.js +4 -5
  82. package/dist/ops/grads/matMulGelu.js +9 -10
  83. package/dist/ops/grads/normRMS.js +7 -8
  84. package/dist/ops/grads/pack16.js +4 -5
  85. package/dist/ops/grads/qkv.js +17 -19
  86. package/dist/ops/grads/rope.js +3 -5
  87. package/dist/ops/grads/softmax16.js +3 -4
  88. package/dist/ops/grads/unpack16.js +3 -4
  89. package/dist/ops/grads/utils.d.ts +1 -0
  90. package/dist/ops/grads/utils.js +8 -4
  91. package/dist/ops/matMul16.js +3 -3
  92. package/dist/ops/matMulGelu.js +2 -2
  93. package/dist/ops/matMulMul.js +1 -1
  94. package/dist/ops/mul16.js +1 -1
  95. package/dist/ops/mulDrop.js +1 -1
  96. package/dist/ops/normRMS.js +1 -1
  97. package/dist/ops/pack16.js +3 -4
  98. package/dist/ops/qkv.js +4 -8
  99. package/dist/ops/reshape16.js +14 -16
  100. package/dist/ops/rope.d.ts +1 -1
  101. package/dist/ops/rope.js +3 -8
  102. package/dist/ops/scatterSub.js +1 -1
  103. package/dist/ops/slice16.js +2 -2
  104. package/dist/ops/softmax16.js +5 -8
  105. package/dist/ops/sub16.js +1 -1
  106. package/dist/ops/sum16.js +2 -2
  107. package/dist/ops/transpose16.js +23 -24
  108. package/dist/ops/unpack16.js +2 -2
  109. package/dist/ops/webgl/adamAdjust.js +2 -3
  110. package/dist/ops/webgl/adamMoments.js +1 -2
  111. package/dist/ops/webgl/appendCache.js +1 -2
  112. package/dist/ops/webgl/attentionMask.js +4 -5
  113. package/dist/ops/webgl/fusedSoftmax.js +4 -6
  114. package/dist/ops/webgl/gatherSub.js +6 -7
  115. package/dist/ops/webgl/gelu.js +2 -3
  116. package/dist/ops/webgl/log.js +11 -12
  117. package/dist/ops/webgl/matMul16.js +10 -11
  118. package/dist/ops/webgl/matMulGelu.js +7 -111
  119. package/dist/ops/webgl/matMulMul.js +9 -10
  120. package/dist/ops/webgl/mulDropout.js +8 -9
  121. package/dist/ops/webgl/normRMS.js +2 -3
  122. package/dist/ops/webgl/qkv.js +5 -6
  123. package/dist/ops/webgl/rope.js +7 -8
  124. package/dist/ops/webgl/scatterSub.js +5 -6
  125. package/dist/ops/webgpu/adamAdjust.js +10 -12
  126. package/dist/ops/webgpu/adamMoments.js +8 -10
  127. package/dist/ops/webgpu/add16.js +8 -9
  128. package/dist/ops/webgpu/appendCache.js +23 -25
  129. package/dist/ops/webgpu/attentionMask.js +8 -10
  130. package/dist/ops/webgpu/attentionMask32_program.js +2 -2
  131. package/dist/ops/webgpu/concat16.js +12 -14
  132. package/dist/ops/webgpu/gatherSub.js +11 -13
  133. package/dist/ops/webgpu/gelu.js +28 -29
  134. package/dist/ops/webgpu/matMul16.js +26 -28
  135. package/dist/ops/webgpu/matMul16_program.js +4 -5
  136. package/dist/ops/webgpu/mul16.js +9 -10
  137. package/dist/ops/webgpu/normRMS.js +15 -17
  138. package/dist/ops/webgpu/normRMSGrad.js +21 -28
  139. package/dist/ops/webgpu/pack16.js +12 -13
  140. package/dist/ops/webgpu/pack16_program.js +2 -2
  141. package/dist/ops/webgpu/qkv.js +16 -18
  142. package/dist/ops/webgpu/rope.js +25 -27
  143. package/dist/ops/webgpu/scatterSub.js +7 -9
  144. package/dist/ops/webgpu/slice16.js +21 -23
  145. package/dist/ops/webgpu/softmax16.js +17 -19
  146. package/dist/ops/webgpu/softmax16_program.js +2 -2
  147. package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
  148. package/dist/ops/webgpu/softmax16grad.js +7 -8
  149. package/dist/ops/webgpu/sub16.js +7 -8
  150. package/dist/ops/webgpu/sum16.js +18 -20
  151. package/dist/ops/webgpu/transpose16.js +19 -20
  152. package/dist/ops/webgpu/transpose16_program.js +2 -2
  153. package/dist/ops/webgpu/transpose16_shared_program.js +11 -12
  154. package/dist/ops/webgpu/unpack16.js +3 -4
  155. package/dist/ops/webgpu/utils/binary_op.js +7 -8
  156. package/dist/ops/webgpu/utils/reductions.js +14 -22
  157. package/dist/ops-B5yanEdW.js +476 -0
  158. package/dist/pack16-nQ6JaLo-.js +39 -0
  159. package/dist/patches/webgpu_backend.js +19 -20
  160. package/dist/patches/webgpu_base.js +1 -1
  161. package/dist/patches/webgpu_program.js +21 -22
  162. package/dist/{random_width-BVV9HveY.js → random_width-or-CEftb.js} +2506 -2761
  163. package/dist/range-BklejeeW.js +10 -0
  164. package/dist/relu-CP0ZcxWO.js +9 -0
  165. package/dist/reshape-ByE68wS9.js +9 -0
  166. package/dist/resize_nearest_neighbor-B19mCEg2.js +175 -0
  167. package/dist/rope-Ir4mTyD1.js +24 -0
  168. package/dist/{scatter_nd_util-C7zXRT_h.js → scatter_nd_util-lvSiX8q4.js} +1 -1
  169. package/dist/selu_util-kbhpTdYD.js +44 -0
  170. package/dist/{shared-CHhxz-O5.js → shared-DT1TkE6w.js} +1 -1
  171. package/dist/{shared-D2NP_CpY.js → shared-dntlHIDQ.js} +343 -345
  172. package/dist/slice-BfEGSH82.js +12 -0
  173. package/dist/{slice_util-DyjSAD0u.js → slice_util-uTKwiEpW.js} +1 -1
  174. package/dist/{softmax-C9JQEtnO.js → softmax-CA5jFsLR.js} +4 -5
  175. package/dist/split-CVLc0w--.js +9 -0
  176. package/dist/squeeze-C7Z2srUo.js +10 -0
  177. package/dist/stack-Cf4n9h0N.js +11 -0
  178. package/dist/step-CINUs5QB.js +261 -0
  179. package/dist/sum-DWAtNGez.js +11 -0
  180. package/dist/tensor-DJoc7gJU.js +8 -0
  181. package/dist/tensor1d-D11P_7Dp.js +11 -0
  182. package/dist/{tensor2d-CSB4KOb0.js → tensor2d-Bs9wZRc7.js} +6 -7
  183. package/dist/{tensor4d-D7bLqGqz.js → tensor4d-BARPdTaS.js} +6 -7
  184. package/dist/{tfjs_backend-CNkSTL0c.js → tfjs_backend-y1cvNhLA.js} +255 -264
  185. package/dist/tile-mbfagpsB.js +11 -0
  186. package/dist/training/Adam.js +2 -2
  187. package/dist/training/AdamExt.js +1 -1
  188. package/dist/training/DatasetBuilder.js +2 -2
  189. package/dist/training/FullTrainer.js +1 -1
  190. package/dist/training/Trainer.js +2 -2
  191. package/dist/training/sparseCrossEntropy.js +5 -5
  192. package/dist/transpose-ClWiBS_b.js +36 -0
  193. package/dist/unsorted_segment_sum-BDDhB_E6.js +277 -0
  194. package/dist/utilities/dummy.js +3 -3
  195. package/dist/utilities/multinomialCPU.js +2 -2
  196. package/dist/utilities/packed.d.ts +1 -4
  197. package/dist/utilities/packed.js +10 -745
  198. package/dist/utilities/performance.js +1 -1
  199. package/dist/utilities/profile.js +1 -1
  200. package/dist/utilities/safetensors.js +2 -2
  201. package/dist/utilities/sentences.js +5 -5
  202. package/dist/utilities/weights.js +2 -2
  203. package/dist/{variable-DzfrwYuP.js → variable-WawDEaAb.js} +1 -1
  204. package/dist/{webgpu_program-DzaQiqel.js → webgpu_program-DuOXPQol.js} +178 -172
  205. package/dist/{webgpu_util-0_ubCEHJ.js → webgpu_util-RxEF33Rj.js} +34 -35
  206. package/dist/zeros-KnWaWf-X.js +13 -0
  207. package/dist/zeros_like-DvE73F4e.js +721 -0
  208. package/package.json +4 -2
  209. package/dist/Reshape-CDVLyVfz.js +0 -16
  210. package/dist/broadcast_to-B0ChcDaz.js +0 -30
  211. package/dist/complex-BBiRlsVq.js +0 -13
  212. package/dist/concat-DmBLPVGC.js +0 -19
  213. package/dist/dropout-B1x1kYMa.js +0 -99
  214. package/dist/expand_dims-ouvfxQ1n.js +0 -13
  215. package/dist/gather-CH9sdacz.js +0 -10
  216. package/dist/index-D6Q1lPZO.js +0 -2157
  217. package/dist/log_sum_exp-D3ftBNY5.js +0 -41
  218. package/dist/mat_mul-C59XWcJd.js +0 -12
  219. package/dist/mod-DESSvHIU.js +0 -12
  220. package/dist/mulmat_packed_gpu-Coh6qbJk.js +0 -55
  221. package/dist/ones-jU9jlQvM.js +0 -15
  222. package/dist/ops-BFDtP6th.js +0 -645
  223. package/dist/pack16-CmVZs6af.js +0 -41
  224. package/dist/patches/PackedTensor.d.ts +0 -12
  225. package/dist/patches/PackedTensor.js +0 -11
  226. package/dist/patches/engine.d.ts +0 -261
  227. package/dist/patches/engine.js +0 -12
  228. package/dist/patches/tape.d.ts +0 -12
  229. package/dist/patches/tape.js +0 -5
  230. package/dist/range-ZZZD60Fx.js +0 -11
  231. package/dist/reciprocal-CrYlsAGD.js +0 -10
  232. package/dist/register_all_kernels-nvj2k7OC.js +0 -12307
  233. package/dist/relu-BYDneVPn.js +0 -10
  234. package/dist/reshape-CaPQzFvz.js +0 -10
  235. package/dist/rope-s4W2XO9B.js +0 -32
  236. package/dist/selu_util-BGPXmd4B.js +0 -303
  237. package/dist/sin-Djs4aQiu.js +0 -16
  238. package/dist/slice-DvovR5wq.js +0 -13
  239. package/dist/split-DBck65sX.js +0 -10
  240. package/dist/squeeze-C00Ipm_7.js +0 -11
  241. package/dist/stack-ChnHwRpX.js +0 -13
  242. package/dist/sum-ywRJj3Zr.js +0 -12
  243. package/dist/tensor-0r5yOo2R.js +0 -8
  244. package/dist/tensor-CzmOBsdf.js +0 -909
  245. package/dist/tensor1d-BlUT89BP.js +0 -12
  246. package/dist/tensor_util-DfwaWayG.js +0 -523
  247. package/dist/tile-CR074jmp.js +0 -13
  248. package/dist/transpose-DH4gmHvu.js +0 -38
  249. package/dist/zeros-DBFVbpv5.js +0 -14
@@ -1,10 +1,9 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { e as s } from "../../webgpu_program-DzaQiqel.js";
3
- import { f as o, c as p } from "../../webgpu_util-0_ubCEHJ.js";
1
+ import { p as d } from "../../index-DOvlwCh-.js";
2
+ import { e as s } from "../../webgpu_program-DuOXPQol.js";
3
+ import { f as n, c as o } from "../../webgpu_util-RxEF33Rj.js";
4
4
  import { isPackedTensor as l } from "../../utilities/packed.js";
5
- import { r as h } from "../../tensor_util-DfwaWayG.js";
6
- const r = 0.7978845608028654, u = 0.044715;
7
- class x {
5
+ const u = 0.7978845608028654, r = 0.044715;
6
+ class c {
8
7
  outputShape;
9
8
  shaderKey;
10
9
  dispatchLayout;
@@ -13,7 +12,7 @@ class x {
13
12
  workgroupSize;
14
13
  size = !0;
15
14
  constructor(e) {
16
- this.workgroupSize = [128, 1, 1], this.outputShape = e, this.dispatchLayout = o(this.outputShape), this.dispatch = p(this.dispatchLayout, this.outputShape, this.workgroupSize), this.shaderKey = "unary_gelu";
15
+ this.workgroupSize = [128, 1, 1], this.outputShape = e, this.dispatchLayout = n(this.outputShape), this.dispatch = o(this.dispatchLayout, this.outputShape, this.workgroupSize), this.shaderKey = "unary_gelu";
17
16
  }
18
17
  getUserCode() {
19
18
  return `
@@ -23,8 +22,8 @@ class x {
23
22
  }
24
23
  fn unaryOperation(x : f32) -> f32 {
25
24
  let x3 = x * x * x;
26
- var inner = fma(${u}, x3, x);
27
- inner = ${r} * inner;
25
+ var inner = fma(${r}, x3, x);
26
+ inner = ${u} * inner;
28
27
  inner = tanhComplete(inner);
29
28
  inner = 0.5 * (1.0 + inner);
30
29
  return x * inner;
@@ -38,17 +37,17 @@ class x {
38
37
  `;
39
38
  }
40
39
  }
41
- function g(t) {
42
- const { x: e } = t.inputs, a = t.backend, i = new x(e.shape);
40
+ function x(t) {
41
+ const { x: e } = t.inputs, a = t.backend, i = new c(e.shape);
43
42
  return a.runWebGPUProgram(i, [e], "float32");
44
43
  }
45
- const f = {
44
+ const g = {
46
45
  kernelName: "Gelu",
47
46
  backendName: "webgpu",
48
- kernelFunc: g
47
+ kernelFunc: x
49
48
  };
50
- h(f);
51
- class m {
49
+ d(g);
50
+ class f {
52
51
  // Inputs: dy, x
53
52
  variableNames = ["dy", "x"];
54
53
  outputShape;
@@ -58,7 +57,7 @@ class m {
58
57
  workgroupSize = [128, 1, 1];
59
58
  size = !0;
60
59
  constructor(e) {
61
- this.outputShape = e, this.dispatchLayout = o(this.outputShape), this.dispatch = p(this.dispatchLayout, this.outputShape, this.workgroupSize);
60
+ this.outputShape = e, this.dispatchLayout = n(this.outputShape), this.dispatch = o(this.dispatchLayout, this.outputShape, this.workgroupSize);
62
61
  }
63
62
  getUserCode() {
64
63
  return `
@@ -69,10 +68,10 @@ class m {
69
68
  fn activationGrad(dy: f32, X: f32) -> f32 {
70
69
  let x2 = X * X;
71
70
  let x3 = x2 * X;
72
- let u = ${r} * (X + ${u} * x3);
71
+ let u = ${u} * (X + ${r} * x3);
73
72
  let t = tanhComplete(u);
74
73
  let sech2 = 1.0 - t * t;
75
- let du_dx = ${r} * (1.0 + 3.0 * ${u} * x2);
74
+ let du_dx = ${u} * (1.0 + 3.0 * ${r} * x2);
76
75
  let dgelu = 0.5 * (1.0 + t) + 0.5 * X * sech2 * du_dx;
77
76
  return dy *dgelu;
78
77
  }
@@ -89,7 +88,7 @@ class m {
89
88
  }`;
90
89
  }
91
90
  }
92
- class y {
91
+ class m {
93
92
  // Inputs: dy, x
94
93
  variableNames = ["dy", "x"];
95
94
  outputShape;
@@ -99,7 +98,7 @@ class y {
99
98
  workgroupSize = [128, 1, 1];
100
99
  size = !0;
101
100
  constructor(e) {
102
- this.outputShape = e, this.dispatchLayout = o(this.outputShape), this.dispatch = p(this.dispatchLayout, this.outputShape, this.workgroupSize);
101
+ this.outputShape = e, this.dispatchLayout = n(this.outputShape), this.dispatch = o(this.dispatchLayout, this.outputShape, this.workgroupSize);
103
102
  }
104
103
  getUserCode() {
105
104
  return `
@@ -110,10 +109,10 @@ class y {
110
109
  fn activationGrad(dy: f32, X: f32) -> f32 {
111
110
  let x2 = X * X;
112
111
  let x3 = x2 * X;
113
- let u = ${r} * (X + ${u} * x3);
112
+ let u = ${u} * (X + ${r} * x3);
114
113
  let t = tanhComplete(u);
115
114
  let sech2 = 1.0 - t * t;
116
- let du_dx = ${r} * (1.0 + 3.0 * ${u} * x2);
115
+ let du_dx = ${u} * (1.0 + 3.0 * ${r} * x2);
117
116
  let dgelu = 0.5 * (1.0 + t) + 0.5 * X * sech2 * du_dx;
118
117
  return dy *dgelu;
119
118
  }
@@ -127,16 +126,16 @@ class y {
127
126
  }`;
128
127
  }
129
128
  }
130
- function b(t) {
131
- const { dy: e, x: a } = t.inputs, i = t.backend, n = l(e), c = n ? new m(a.shape) : new y(a.shape), d = i.runWebGPUProgram(c, [e, a], n ? "int32" : "float32");
132
- return d.packed = n, d;
129
+ function y(t) {
130
+ const { dy: e, x: a } = t.inputs, i = t.backend, p = l(e), h = p ? new f(a.shape) : new m(a.shape);
131
+ return i.runWebGPUProgram(h, [e, a], p ? "packedF16" : "float32");
133
132
  }
134
- const k = {
133
+ const b = {
135
134
  kernelName: "GeluGrad",
136
135
  backendName: "webgpu",
137
- kernelFunc: b
136
+ kernelFunc: y
138
137
  };
139
- h(k);
138
+ d(b);
140
139
  export {
141
- x as GeluProgram
140
+ c as GeluProgram
142
141
  };
@@ -1,34 +1,32 @@
1
- import { m as y, b as B, j as Q } from "../../index-D6Q1lPZO.js";
1
+ import { p as Q, m as P, b as B, h as y, a3 as V } from "../../index-DOvlwCh-.js";
2
2
  import { isPackedTensor as R } from "../../utilities/packed.js";
3
3
  import { reshape16 as U } from "../reshape16.js";
4
- import { matMulMul as V } from "../matMulMul.js";
5
- import { matMulGelu as X } from "../matMulGelu.js";
6
- import Y from "./matMul16_program.js";
7
- import { r as Z } from "../../tensor_util-DfwaWayG.js";
8
- import { m as _ } from "../../mat_mul-C59XWcJd.js";
9
- import { r as x } from "../../reshape-CaPQzFvz.js";
10
- import { t as C } from "../../transpose-DH4gmHvu.js";
11
- import { s as E } from "../../tensor-CzmOBsdf.js";
4
+ import { matMulMul as X } from "../matMulMul.js";
5
+ import { matMulGelu as Y } from "../matMulGelu.js";
6
+ import Z from "./matMul16_program.js";
7
+ import { m as _ } from "../../mat_mul-SjpJRLyL.js";
8
+ import { r as x } from "../../reshape-ByE68wS9.js";
9
+ import { t as C } from "../../transpose-ClWiBS_b.js";
12
10
  function $(p) {
13
- const { A: e, B: s } = p.inputs, { transposeA: d, transposeB: f, scale: i, activation: k, scaleA: c, scaleB: u, forceOutputShape: o, perm: m, causalMask: g, pastLen: W } = p.attrs, z = p.backend, S = !R(e), M = !R(s);
11
+ const { A: e, B: s } = p.inputs, { transposeA: d, transposeB: f, scale: i, activation: k, scaleA: c, scaleB: u, forceOutputShape: o, perm: h, causalMask: g, pastLen: E } = p.attrs, F = p.backend, S = !R(e), M = !R(s);
14
12
  if (S && M) {
15
- const A = c !== void 0 ? y(e, B(c)) : e, b = u !== void 0 ? y(s, B(u)) : s;
13
+ const A = c !== void 0 ? P(e, B(c)) : e, b = u !== void 0 ? P(s, B(u)) : s;
16
14
  if (g)
17
15
  throw new Error("Causal mask is not supported for unpacked MatMul16.");
18
16
  let a;
19
- if (i !== void 0 ? a = V(A, b, B(i), d, f) : k === "gelu" ? a = X(A, b) : a = _(A, b, d, f), m)
17
+ if (i !== void 0 ? a = X(A, b, B(i), d, f) : k === "gelu" ? a = Y(A, b) : a = _(A, b, d, f), h)
20
18
  if (o) {
21
- const n = x(a, o);
19
+ const r = x(a, o);
22
20
  a.dispose();
23
- const J = C(n, m);
24
- return n.dispose(), J;
21
+ const J = C(r, h);
22
+ return r.dispose(), J;
25
23
  } else {
26
- const n = C(a, m);
27
- return a.dispose(), n;
24
+ const r = C(a, h);
25
+ return a.dispose(), r;
28
26
  }
29
27
  else if (o) {
30
- const n = x(a, o);
31
- return a.dispose(), n;
28
+ const r = x(a, o);
29
+ return a.dispose(), r;
32
30
  } else
33
31
  return a;
34
32
  }
@@ -36,23 +34,23 @@ function $(p) {
36
34
  throw new Error("When using mixed precision, A must be packed if B is packed.");
37
35
  if (!S && M)
38
36
  throw new Error("When using mixed precision, B must be packed if A is packed.");
39
- const h = e.shape.length, l = s.shape.length, F = e.shape.slice(0, -2), I = s.shape.slice(0, -2), v = E(F), w = E(I), N = Q(e.shape.slice(0, -2), s.shape.slice(0, -2)), j = Math.max(v, w), K = e.shape[h - 2], L = s.shape[l - 2], T = e.shape[h - 1] * 2, q = s.shape[l - 1] * 2, D = U(e, [v, e.shape[h - 2], e.shape[h - 1]]), G = U(s, [w, s.shape[l - 2], s.shape[l - 1]]), t = new Y(j, K, L, T, q, d, f), r = [];
40
- i !== void 0 && (t.useScale(), r.push({ type: "float32", data: [i] })), c !== void 0 && (t.useScaleA(), r.push({ type: "float32", data: [c] })), u !== void 0 && (t.useScaleB(), r.push({ type: "float32", data: [u] })), k !== void 0 && t.useActivation(k), g && (t.useCausalMask(), r.push({ type: "int32", data: [W || 0] }));
37
+ const l = e.shape.length, m = s.shape.length, W = e.shape.slice(0, -2), z = s.shape.slice(0, -2), v = y(W), w = y(z), I = V(e.shape.slice(0, -2), s.shape.slice(0, -2)), N = Math.max(v, w), K = e.shape[l - 2], L = s.shape[m - 2], T = e.shape[l - 1] * 2, j = s.shape[m - 1] * 2, D = U(e, [v, e.shape[l - 2], e.shape[l - 1]]), G = U(s, [w, s.shape[m - 2], s.shape[m - 1]]), t = new Z(N, K, L, T, j, d, f), n = [];
38
+ i !== void 0 && (t.useScale(), n.push({ type: "float32", data: [i] })), c !== void 0 && (t.useScaleA(), n.push({ type: "float32", data: [c] })), u !== void 0 && (t.useScaleB(), n.push({ type: "float32", data: [u] })), k !== void 0 && t.useActivation(k), g && (t.useCausalMask(), n.push({ type: "int32", data: [E || 0] }));
41
39
  const O = t.outputShape.length;
42
40
  o && (p.attrs.originalShape = t.outputShape);
43
- const H = o ?? N.concat([t.outputShape[O - 2], t.outputShape[O - 1]]);
44
- t.setOutputShape(H, m);
45
- const P = z.runWebGPUProgram(
41
+ const q = o ?? I.concat([t.outputShape[O - 2], t.outputShape[O - 1]]);
42
+ t.setOutputShape(q, h);
43
+ const H = F.runWebGPUProgram(
46
44
  t,
47
45
  [D, G],
48
- "int32",
49
- r.length > 0 ? r : void 0
46
+ "packedF16",
47
+ n.length > 0 ? n : void 0
50
48
  );
51
- return P.packed = !0, D.dispose(), G.dispose(), P;
49
+ return D.dispose(), G.dispose(), H;
52
50
  }
53
51
  const ee = {
54
52
  kernelName: "MatMul16",
55
53
  backendName: "webgpu",
56
54
  kernelFunc: $
57
55
  };
58
- Z(ee);
56
+ Q(ee);
@@ -1,7 +1,6 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { e as h } from "../../webgpu_program-DzaQiqel.js";
3
- import { s as f } from "../../tensor-CzmOBsdf.js";
4
- class A {
1
+ import { h as f } from "../../index-DOvlwCh-.js";
2
+ import { e as h } from "../../webgpu_program-DuOXPQol.js";
3
+ class B {
5
4
  variableNames = ["A", "B"];
6
5
  outputShape;
7
6
  shaderKey = "MatMul16TB";
@@ -332,5 +331,5 @@ class A {
332
331
  }
333
332
  }
334
333
  export {
335
- A as default
334
+ B as default
336
335
  };
@@ -1,14 +1,13 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { BinaryOpProgram as m } from "./utils/binary_op.js";
3
- import { B as p } from "../../binary_op_util-pKXltfxI.js";
4
- import { r as c } from "../../tensor_util-DfwaWayG.js";
5
- function i(r) {
6
- const { a: e, b: n } = r.inputs, t = r.backend, a = new m(p.MUL, e.shape, n.shape), o = t.runWebGPUProgram(a, [e, n], "int32");
7
- return o.packed = !0, o;
1
+ import { p as t } from "../../index-DOvlwCh-.js";
2
+ import { BinaryOpProgram as p } from "./utils/binary_op.js";
3
+ import { B as m } from "../../binary_op_util-pKXltfxI.js";
4
+ function s(e) {
5
+ const { a: r, b: n } = e.inputs, o = e.backend, a = new p(m.MUL, r.shape, n.shape);
6
+ return o.runWebGPUProgram(a, [r, n], "packedF16");
8
7
  }
9
- const s = {
8
+ const c = {
10
9
  kernelName: "Mul16",
11
10
  backendName: "webgpu",
12
- kernelFunc: i
11
+ kernelFunc: s
13
12
  };
14
- c(s);
13
+ t(c);
@@ -1,30 +1,28 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { createReduceInfo as g, reduce as l } from "./utils/reductions.js";
3
- import { j as w } from "../../tensor-CzmOBsdf.js";
4
- import { isPackedTensor as f } from "../../utilities/packed.js";
5
- import { p as k } from "../../pack16-CmVZs6af.js";
6
- import S from "./normRMS16_program.js";
1
+ import { p as g, ab as l } from "../../index-DOvlwCh-.js";
2
+ import { createReduceInfo as w, reduce as S } from "./utils/reductions.js";
3
+ import { isPackedTensor as d } from "../../utilities/packed.js";
4
+ import { p as f } from "../../pack16-nQ6JaLo-.js";
5
+ import b from "./normRMS16_program.js";
7
6
  import z from "./normRMS32_program.js";
8
7
  import N from "./utils/deviceInfo.js";
9
- import { r as b } from "../../tensor_util-DfwaWayG.js";
10
- function P(m) {
11
- const { x: e, gamma: n } = m.inputs, c = m.backend, i = N(c), s = f(e), a = f(n), o = s || a, r = !o || s ? e : k(e), p = !o || a ? n : k(n), h = [r, p], t = g(h, -1), u = o ? new S(i, t) : new z(i, t);
12
- if (w(p.shape, [r.shape[r.shape.length - 1]], "Error in RMSNorm: "), e.shape.length !== 3)
8
+ function P(c) {
9
+ const { x: e, gamma: s } = c.inputs, m = c.backend, i = N(m), t = d(e), a = d(s), n = t || a, r = !n || t ? e : f(e), p = !n || a ? s : f(s), h = [r, p], o = w(h, -1), u = n ? new b(i, o) : new z(i, o);
10
+ if (l(p.shape, [r.shape[r.shape.length - 1]], "Error in RMSNorm: "), e.shape.length !== 3)
13
11
  throw new Error(`rmsNormGPU: input rank ${e.shape.length} not supported, only rank 3 is supported`);
14
- if (t.inSize !== r.shape[r.shape.length - 1])
12
+ if (o.inSize !== r.shape[r.shape.length - 1])
15
13
  throw new Error(
16
- `rmsNormGPU: reduction size ${t.inSize} does not match expected size ${r.shape[r.shape.length - 1]}`
14
+ `rmsNormGPU: reduction size ${o.inSize} does not match expected size ${r.shape[r.shape.length - 1]}`
17
15
  );
18
- if (t.batchSize !== e.shape[0] * e.shape[1])
16
+ if (o.batchSize !== e.shape[0] * e.shape[1])
19
17
  throw new Error(
20
- `rmsNormGPU: batch size ${t.batchSize} does not match expected size ${e.shape[0] * e.shape[1]}`
18
+ `rmsNormGPU: batch size ${o.batchSize} does not match expected size ${e.shape[0] * e.shape[1]}`
21
19
  );
22
- const d = l(u, h, c);
23
- return d.packed = o, o && !s && r.dispose(), o && !a && p.dispose(), d;
20
+ const k = S(u, h, m);
21
+ return n && !t && r.dispose(), n && !a && p.dispose(), k;
24
22
  }
25
23
  const G = {
26
24
  kernelName: "RMSNorm",
27
25
  backendName: "webgpu",
28
26
  kernelFunc: P
29
27
  };
30
- b(G);
28
+ g(G);
@@ -1,14 +1,12 @@
1
- import { e as _ } from "../../index-D6Q1lPZO.js";
2
- import { createReduceInfo as D } from "./utils/reductions.js";
3
- import { f as X } from "../../webgpu_util-0_ubCEHJ.js";
4
- import { e as $ } from "../../webgpu_program-DzaQiqel.js";
5
- import { j as z } from "../../tensor-CzmOBsdf.js";
6
- import { p as k, u as M } from "../../pack16-CmVZs6af.js";
1
+ import { p as _, ab as y, e as D } from "../../index-DOvlwCh-.js";
2
+ import { createReduceInfo as X } from "./utils/reductions.js";
3
+ import { f as $ } from "../../webgpu_util-RxEF33Rj.js";
4
+ import { e as M } from "../../webgpu_program-DuOXPQol.js";
5
+ import { p as k, u as R } from "../../pack16-nQ6JaLo-.js";
7
6
  import { isPackedTensor as h } from "../../utilities/packed.js";
8
- import { reshape16 as R } from "../reshape16.js";
9
- import { sum16 as L } from "../sum16.js";
10
- import { slice16 as w } from "../slice16.js";
11
- import { r as P } from "../../tensor_util-DfwaWayG.js";
7
+ import { reshape16 as L } from "../reshape16.js";
8
+ import { sum16 as P } from "../sum16.js";
9
+ import { slice16 as z } from "../slice16.js";
12
10
  class N {
13
11
  outputShape;
14
12
  shaderKey = "RMSNormGrad";
@@ -23,7 +21,7 @@ class N {
23
21
  packed = !1;
24
22
  outputComponent;
25
23
  constructor(a, e = 4, o = !1) {
26
- if (this.packed = o, this.shaderKey = `RMSNormGrad_${e}`, this.rowsPerWorkgroup = e, this.inputShape = [a.batchSize, a.inSize], this.outputShape = [a.batchSize + a.batchSize / this.rowsPerWorkgroup, a.inSize], this.dispatchLayout = X(this.outputShape), this.dispatch = [a.batchSize / this.rowsPerWorkgroup, 1, 1], a.batchSize % this.rowsPerWorkgroup !== 0)
24
+ if (this.packed = o, this.shaderKey = `RMSNormGrad_${e}`, this.rowsPerWorkgroup = e, this.inputShape = [a.batchSize, a.inSize], this.outputShape = [a.batchSize + a.batchSize / this.rowsPerWorkgroup, a.inSize], this.dispatchLayout = $(this.outputShape), this.dispatch = [a.batchSize / this.rowsPerWorkgroup, 1, 1], a.batchSize % this.rowsPerWorkgroup !== 0)
27
25
  throw new Error(
28
26
  `RMSNormGradProgram: batch size ${a.batchSize} must be divisible by rowsPerWorkgroup ${this.rowsPerWorkgroup}`
29
27
  );
@@ -87,7 +85,7 @@ class N {
87
85
 
88
86
  ${o}
89
87
 
90
- ${$("index")} {
88
+ ${M("index")} {
91
89
  // One workgroup per row (batch).
92
90
  let Length = uniforms.reduceSize;
93
91
  let BatchSize = uniforms.batchSize;
@@ -145,10 +143,10 @@ class N {
145
143
  }
146
144
  function W(p) {
147
145
  const { dy: a, x: e, gamma: o } = p.inputs, n = 4;
148
- z(e.shape, a.shape, "Error in RMSNormGrad dy: ");
146
+ y(e.shape, a.shape, "Error in RMSNormGrad dy: ");
149
147
  const s = h(e), i = h(o), u = h(a), r = s || i || u, m = !r || s ? e : k(e), c = !r || i ? o : k(o), d = !r || u ? a : k(a);
150
- z(c.shape, [m.shape[m.shape.length - 1]], "Error in RMSNormGrad gamma: ");
151
- const G = p.backend, t = D([m, c, d], -1), f = new N(t, n, r), v = [
148
+ y(c.shape, [m.shape[m.shape.length - 1]], "Error in RMSNormGrad gamma: ");
149
+ const w = p.backend, t = X([m, c, d], -1), f = new N(t, n, r), G = [
152
150
  { type: "int32", data: [f.inputShape[1]] },
153
151
  // Reduce size
154
152
  { type: "int32", data: [f.inputShape[0]] }
@@ -156,27 +154,22 @@ function W(p) {
156
154
  ];
157
155
  if (t.inSize > 1024)
158
156
  throw new Error(`rmsNormGradGPU: inSize ${t.inSize} exceeds max of 1024`);
159
- const x = G.runWebGPUProgram(
160
- f,
161
- [m, c, d],
162
- r ? "int32" : "float32",
163
- v
164
- );
165
- x.packed = r, r && !s && m.dispose(), r && !i && c.dispose(), r && !u && d.dispose();
166
- const l = _().makeTensorFromTensorInfo(x), S = w(l, [0, 0], [t.batchSize, t.inSize]), g = w(
157
+ const v = w.runWebGPUProgram(f, [m, c, d], r ? "packedF16" : "float32", G);
158
+ r && !s && m.dispose(), r && !i && c.dispose(), r && !u && d.dispose();
159
+ const l = D().makeTensorFromTensorInfo(v), x = z(l, [0, 0], [t.batchSize, t.inSize]), S = z(
167
160
  l,
168
161
  [t.batchSize, 0],
169
162
  [t.batchSize / n, t.inSize]
170
163
  );
171
164
  l.dispose();
172
- const b = R(S, e.shape);
173
- S.dispose();
174
- const y = L(g, [0]);
175
- return g.dispose(), [b, !r || i ? y : M(y)];
165
+ const b = L(x, e.shape);
166
+ x.dispose();
167
+ const g = P(S, [0]);
168
+ return S.dispose(), [b, !r || i ? g : R(g)];
176
169
  }
177
170
  const Y = {
178
171
  kernelName: "RMSNormGrad",
179
172
  backendName: "webgpu",
180
173
  kernelFunc: W
181
174
  };
182
- P(Y);
175
+ _(Y);
@@ -1,19 +1,18 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import c from "./pack16_program.js";
3
- import { r as p } from "../../tensor_util-DfwaWayG.js";
4
- function m(n) {
5
- const { x: e } = n.inputs, { scaling: t, padding: r } = n.attrs, i = n.backend;
6
- if (e.shape[e.shape.length - 1] % 2 !== 0)
1
+ import { p as i } from "../../index-DOvlwCh-.js";
2
+ import p from "./pack16_program.js";
3
+ function m(e) {
4
+ const { x: n } = e.inputs, { scaling: a, padding: r } = e.attrs, s = e.backend;
5
+ if (n.shape[n.shape.length - 1] % 2 !== 0)
7
6
  throw new Error("Last dimension of input tensor must be even to use Pack16.");
8
- n.attrs && (n.attrs.originalShape = e.shape);
9
- const a = new c(e.shape, r), o = t !== 1;
10
- o && a.useScaling();
11
- const s = [{ type: "float32", data: [t] }];
12
- return i.runWebGPUProgram(a, [e], "int32", o ? s : void 0);
7
+ e.attrs && (e.attrs.originalShape = n.shape);
8
+ const t = new p(n.shape, r), o = a !== 1;
9
+ o && t.useScaling();
10
+ const c = [{ type: "float32", data: [a] }];
11
+ return s.runWebGPUProgram(t, [n], "packedF16", o ? c : void 0);
13
12
  }
14
- const u = {
13
+ const k = {
15
14
  kernelName: "Pack16",
16
15
  backendName: "webgpu",
17
16
  kernelFunc: m
18
17
  };
19
- p(u);
18
+ i(k);
@@ -1,5 +1,5 @@
1
- import { f as o, c as a } from "../../webgpu_util-0_ubCEHJ.js";
2
- import { e as s } from "../../webgpu_program-DzaQiqel.js";
1
+ import { f as o, c as a } from "../../webgpu_util-RxEF33Rj.js";
2
+ import { e as s } from "../../webgpu_program-DuOXPQol.js";
3
3
  class h {
4
4
  outputShape;
5
5
  shaderKey = "Pack16";
@@ -1,26 +1,24 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { j as h } from "../../tensor-CzmOBsdf.js";
3
- import { b as f } from "../../matMul16-fEAJ4smh.js";
1
+ import { p as h, ab as l } from "../../index-DOvlwCh-.js";
2
+ import { b as f } from "../../matMul16-BWRSOCWB.js";
4
3
  import { slice16 as a } from "../slice16.js";
5
- import { isPackedTensor as l } from "../../utilities/packed.js";
6
- import { r as u } from "../../tensor_util-DfwaWayG.js";
7
- function k(i) {
8
- const { x: r, kernel: c } = i.inputs, { heads: e } = i.attrs, t = r.shape[0], n = r.shape[1], s = r.shape[2], m = l(r);
9
- if (h(c.shape, [m ? s * 2 : s, 3 * s], "Error in QKV: "), s % e !== 0)
4
+ import { isPackedTensor as u } from "../../utilities/packed.js";
5
+ function b(i) {
6
+ const { x: n, kernel: c } = i.inputs, { heads: e } = i.attrs, r = n.shape[0], t = n.shape[1], s = n.shape[2], p = u(n);
7
+ if (l(c.shape, [p ? s * 2 : s, 3 * s], "Error in QKV: "), s % e !== 0)
10
8
  throw new Error(`Channel dimension ${s} must be divisible by number of heads ${e} in QKV.`);
11
- const o = f(r, c, !1, !1, {
12
- forceOutputShape: [t, n, 3 * e, s / e],
9
+ const o = f(n, c, !1, !1, {
10
+ forceOutputShape: [r, t, 3 * e, s / e],
13
11
  perm: [0, 2, 1, 3]
14
- }), p = [
15
- a(o, [0, 0, 0, 0], [t, e, n, s / e]),
16
- a(o, [0, e, 0, 0], [t, e, n, s / e]),
17
- a(o, [0, 2 * e, 0, 0], [t, e, n, s / e])
12
+ }), m = [
13
+ a(o, [0, 0, 0, 0], [r, e, t, s / e]),
14
+ a(o, [0, e, 0, 0], [r, e, t, s / e]),
15
+ a(o, [0, 2 * e, 0, 0], [r, e, t, s / e])
18
16
  ];
19
- return o.dispose(), p;
17
+ return o.dispose(), m;
20
18
  }
21
- const b = {
19
+ const k = {
22
20
  kernelName: "QKV",
23
21
  backendName: "webgpu",
24
- kernelFunc: k
22
+ kernelFunc: b
25
23
  };
26
- u(b);
24
+ h(k);
@@ -1,10 +1,8 @@
1
- import { isPackedTensor as w } from "../../utilities/packed.js";
2
- import { e as x } from "../../webgpu_program-DzaQiqel.js";
3
- import { f as l, c as m } from "../../webgpu_util-0_ubCEHJ.js";
4
- import "../../index-D6Q1lPZO.js";
5
- import { j as b } from "../../tensor-CzmOBsdf.js";
6
- import { r as v } from "../../tensor_util-DfwaWayG.js";
7
- class k {
1
+ import { isPackedTensor as y } from "../../utilities/packed.js";
2
+ import { e as c } from "../../webgpu_program-DuOXPQol.js";
3
+ import { f as x, c as l } from "../../webgpu_util-RxEF33Rj.js";
4
+ import { p as w, ab as b } from "../../index-DOvlwCh-.js";
5
+ class v {
8
6
  variableNames = ["x", "sin", "cos"];
9
7
  outputShape;
10
8
  shaderKey = "Rope";
@@ -13,13 +11,13 @@ class k {
13
11
  workgroupSize = [64, 1, 1];
14
12
  size = !0;
15
13
  uniforms = "pastLen: i32";
16
- constructor(e, s, r, t) {
17
- this.shaderKey = `Rope_${t}`, this.outputShape = [e, s, r, t], this.dispatchLayout = l(this.outputShape), this.dispatch = m(this.dispatchLayout, this.outputShape, this.workgroupSize);
14
+ constructor(e, o, a, t) {
15
+ this.shaderKey = `Rope_${t}`, this.outputShape = [e, o, a, t], this.dispatchLayout = x(this.outputShape), this.dispatch = l(this.dispatchLayout, this.outputShape, this.workgroupSize);
18
16
  }
19
17
  getUserCode() {
20
18
  const e = this.outputShape[3];
21
19
  return `
22
- ${x("index")} {
20
+ ${c("index")} {
23
21
  if (index < uniforms.size) {
24
22
  let coords = getCoordsFromIndex(index); // [b, h, t, d]
25
23
  let b = coords[0];
@@ -64,7 +62,7 @@ class k {
64
62
  `;
65
63
  }
66
64
  }
67
- class L {
65
+ class k {
68
66
  variableNames = ["x", "sin", "cos"];
69
67
  outputShape;
70
68
  shaderKey = "Rope";
@@ -73,12 +71,12 @@ class L {
73
71
  workgroupSize = [64, 1, 1];
74
72
  size = !0;
75
73
  uniforms = "pastLen: i32";
76
- constructor(e, s, r, t) {
77
- this.shaderKey = `Rope_${t}`, this.outputShape = [e, s, r, t / 2], this.dispatchLayout = l(this.outputShape), this.dispatch = m(this.dispatchLayout, this.outputShape, this.workgroupSize);
74
+ constructor(e, o, a, t) {
75
+ this.shaderKey = `Rope_${t}`, this.outputShape = [e, o, a, t / 2], this.dispatchLayout = x(this.outputShape), this.dispatch = l(this.dispatchLayout, this.outputShape, this.workgroupSize);
78
76
  }
79
77
  getUserCode() {
80
78
  return `
81
- ${x("index")} {
79
+ ${c("index")} {
82
80
  if (index < uniforms.size) {
83
81
  let coords = getCoordsFromIndex(index); // [b, h, t, d]
84
82
  let b = coords[0];
@@ -116,22 +114,22 @@ class L {
116
114
  `;
117
115
  }
118
116
  }
119
- function P(i) {
120
- const { x: e } = i.inputs, { pastLen: s, negSin: r, ropeCache: t } = i.attrs, f = i.backend, a = w(e), p = e.shape[0], h = e.shape[1], n = e.shape[2], d = a ? e.shape[3] * 2 : e.shape[3], o = r ? t.getNegSin() : t.getSin(), u = t.getCos();
121
- if (b(o.shape, u.shape, "Error in Rope: "), o.shape[0] < n + s)
117
+ function L(i) {
118
+ const { x: e } = i.inputs, { pastLen: o, negSin: a, ropeCache: t } = i.attrs, m = i.backend, d = y(e), p = e.shape[0], h = e.shape[1], r = e.shape[2], n = d ? e.shape[3] * 2 : e.shape[3], s = a ? t.getNegSin() : t.getSin(), u = t.getCos();
119
+ if (b(s.shape, u.shape, "Error in Rope: "), s.shape[0] < r + o)
122
120
  throw new Error(
123
- `Sin tensor shape ${o.shape} is not compatible with seqLength ${n} and pastLen ${s}.`
121
+ `Sin tensor shape ${s.shape} is not compatible with seqLength ${r} and pastLen ${o}.`
124
122
  );
125
- if (o.shape[1] * 2 < d)
126
- throw new Error(`Sin tensor shape ${o.shape} is not compatible with feature dimension ${d}.`);
127
- if (o.shape.length !== 3)
128
- throw new Error(`Sin tensor must be 3-dimensional, but got shape ${o.shape}.`);
129
- const S = a ? new L(p, h, n, d) : new k(p, h, n, d), g = [{ type: "int32", data: [s] }], y = a ? "int32" : e.dtype, c = f.runWebGPUProgram(S, [e, o, u], y, g);
130
- return c.packed = a, c;
123
+ if (s.shape[1] * 2 < n)
124
+ throw new Error(`Sin tensor shape ${s.shape} is not compatible with feature dimension ${n}.`);
125
+ if (s.shape.length !== 3)
126
+ throw new Error(`Sin tensor must be 3-dimensional, but got shape ${s.shape}.`);
127
+ const f = d ? new k(p, h, r, n) : new v(p, h, r, n), S = [{ type: "int32", data: [o] }], g = d ? "packedF16" : e.dtype;
128
+ return m.runWebGPUProgram(f, [e, s, u], g, S);
131
129
  }
132
- const $ = {
130
+ const P = {
133
131
  kernelName: "Rope",
134
132
  backendName: "webgpu",
135
- kernelFunc: P
133
+ kernelFunc: L
136
134
  };
137
- v($);
135
+ w(P);
@@ -1,8 +1,6 @@
1
- import { e as p } from "../../webgpu_program-DzaQiqel.js";
2
- import { f as u, c as d } from "../../webgpu_util-0_ubCEHJ.js";
3
- import "../../index-D6Q1lPZO.js";
4
- import { j as s } from "../../tensor-CzmOBsdf.js";
5
- import { r as h } from "../../tensor_util-DfwaWayG.js";
1
+ import { e as p } from "../../webgpu_program-DuOXPQol.js";
2
+ import { f as u, c as d } from "../../webgpu_util-RxEF33Rj.js";
3
+ import { p as h, ab as o } from "../../index-DOvlwCh-.js";
6
4
  class b {
7
5
  variableNames = ["labels", "softmaxProbs", "dy"];
8
6
  outputShape;
@@ -28,11 +26,11 @@ class b {
28
26
  `;
29
27
  }
30
28
  }
31
- function f(o) {
32
- const { logits: t, labels: e, dy: a } = o.inputs, c = o.backend, r = e.shape[0], i = t.shape[1];
33
- s(a.shape, [r], "Error in EfficientScatterSub dy: "), s(t.shape, [r, i], "Error in EfficientScatterSub logits: "), s(e.shape, [r], "Error in EfficientScatterSub labels: ");
29
+ function f(a) {
30
+ const { logits: t, labels: e, dy: s } = a.inputs, c = a.backend, r = e.shape[0], i = t.shape[1];
31
+ o(s.shape, [r], "Error in EfficientScatterSub dy: "), o(t.shape, [r, i], "Error in EfficientScatterSub logits: "), o(e.shape, [r], "Error in EfficientScatterSub labels: ");
34
32
  const n = new b(r, i);
35
- return c.runWebGPUProgram(n, [e, t, a], "float32");
33
+ return c.runWebGPUProgram(n, [e, t, s], "float32");
36
34
  }
37
35
  const l = {
38
36
  kernelName: "EfficientScatterSub",