@genai-fi/nanogpt 0.10.2 → 0.10.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (249) hide show
  1. package/dist/Generator.js +11761 -171
  2. package/dist/{RealDiv-zz7FpkKX.js → RealDiv-KAPDe8zB.js} +23 -25
  3. package/dist/Reshape-BYkmUnAv.js +14 -0
  4. package/dist/{Reshape-CHdUjC72.js → Reshape-Zt6eb7yh.js} +18 -20
  5. package/dist/TeachableLLM.js +10 -11
  6. package/dist/{axis_util-BsIr9ZNu.js → axis_util-BaG7mf5A.js} +3 -3
  7. package/dist/backend.js +2 -2
  8. package/dist/{backend_util-B1XRLuq9.js → backend_util-RCe-rHaj.js} +72 -73
  9. package/dist/{backend_webgpu-CqpfEImu.js → backend_webgpu-DE3ACOLx.js} +45 -47
  10. package/dist/broadcast_to-B3eYlZm7.js +28 -0
  11. package/dist/checks/appendCache.js +2 -2
  12. package/dist/checks/attentionMask.js +3 -3
  13. package/dist/checks/gelu.js +2 -2
  14. package/dist/checks/matMulGelu.js +7 -11
  15. package/dist/checks/normRMS.js +9 -9
  16. package/dist/checks/normRMSGrad.js +3 -3
  17. package/dist/checks/packUnpack.js +2 -2
  18. package/dist/checks/qkv.js +12 -13
  19. package/dist/checks/rope.js +2 -2
  20. package/dist/clip_by_value-BnO7-a88.js +12 -0
  21. package/dist/complex-DjxcVmoX.js +11 -0
  22. package/dist/concat-BV8bt5H-.js +17 -0
  23. package/dist/{concat_util-iBYIyuQe.js → concat_util-DpW8mL_l.js} +1 -1
  24. package/dist/{dataset-D2P7rHAw.js → dataset-BcwmTGYc.js} +137 -139
  25. package/dist/dropout-BcvN9JYi.js +92 -0
  26. package/dist/expand_dims-DT4tEPwA.js +11 -0
  27. package/dist/{exports_initializers-CZSUJoVE.js → exports_initializers-Hta_rEnm.js} +1 -1
  28. package/dist/floor-D5QdR_le.js +9 -0
  29. package/dist/gather-D3JcZUaI.js +9 -0
  30. package/dist/{gelu-Bmhopi0J.js → gelu-CjNPL4OH.js} +10 -11
  31. package/dist/{gpgpu_math-DsCcikas.js → gpgpu_math-DAOmgtXR.js} +841 -1015
  32. package/dist/{index-DRyE072i.js → index-BwexR4lA.js} +262 -263
  33. package/dist/index-DOvlwCh-.js +3520 -0
  34. package/dist/{kernel_funcs_utils-CWfOAPGO.js → kernel_funcs_utils-CCzYdUZg.js} +130 -132
  35. package/dist/layers/BaseLayer.js +15 -16
  36. package/dist/layers/CausalSelfAttention.js +6 -6
  37. package/dist/layers/MLP.js +4 -4
  38. package/dist/layers/PositionEmbedding.js +7 -7
  39. package/dist/layers/RMSNorm.js +3 -3
  40. package/dist/layers/RoPECache.js +9 -9
  41. package/dist/layers/TiedEmbedding.js +6 -6
  42. package/dist/layers/TransformerBlock.js +1 -1
  43. package/dist/loader/loadTransformers.js +1 -1
  44. package/dist/loader/oldZipLoad.js +13 -14
  45. package/dist/log_sum_exp-ngO0-4pK.js +39 -0
  46. package/dist/main.js +49 -50
  47. package/dist/{matMul16-fEAJ4smh.js → matMul16-BWRSOCWB.js} +14 -15
  48. package/dist/matMulGelu-CzfgT6Wq.js +163 -0
  49. package/dist/mat_mul-SjpJRLyL.js +11 -0
  50. package/dist/mod-AnXEvvpo.js +11 -0
  51. package/dist/models/NanoGPTV1.js +2 -2
  52. package/dist/models/model.js +13 -14
  53. package/dist/ones-D2rT0xk2.js +14 -0
  54. package/dist/ops/adamAdjust.js +1 -1
  55. package/dist/ops/adamMoments.js +1 -1
  56. package/dist/ops/add16.js +1 -1
  57. package/dist/ops/appendCache.js +3 -3
  58. package/dist/ops/attentionMask.js +1 -1
  59. package/dist/ops/concat16.js +2 -2
  60. package/dist/ops/cpu/adamAdjust.js +13 -14
  61. package/dist/ops/cpu/adamMoments.js +6 -7
  62. package/dist/ops/cpu/appendCache.js +7 -8
  63. package/dist/ops/cpu/attentionMask.js +7 -7
  64. package/dist/ops/cpu/fusedSoftmax.js +10 -11
  65. package/dist/ops/cpu/gatherSub.js +9 -10
  66. package/dist/ops/cpu/gelu.js +9 -10
  67. package/dist/ops/cpu/matMul16.js +6 -7
  68. package/dist/ops/cpu/matMulGelu.js +5 -6
  69. package/dist/ops/cpu/matMulMul.js +3 -4
  70. package/dist/ops/cpu/mulDropout.js +3 -4
  71. package/dist/ops/cpu/normRMS.js +10 -11
  72. package/dist/ops/cpu/qkv.js +8 -9
  73. package/dist/ops/cpu/rope.js +5 -6
  74. package/dist/ops/cpu/scatterSub.js +17 -19
  75. package/dist/ops/dot16.js +2 -2
  76. package/dist/ops/gatherSub.js +1 -1
  77. package/dist/ops/gelu.js +2 -2
  78. package/dist/ops/grads/add16.js +11 -12
  79. package/dist/ops/grads/attentionMask.js +5 -6
  80. package/dist/ops/grads/gelu.js +3 -4
  81. package/dist/ops/grads/matMul16.js +4 -5
  82. package/dist/ops/grads/matMulGelu.js +9 -10
  83. package/dist/ops/grads/normRMS.js +7 -8
  84. package/dist/ops/grads/pack16.js +4 -5
  85. package/dist/ops/grads/qkv.js +17 -19
  86. package/dist/ops/grads/rope.js +3 -5
  87. package/dist/ops/grads/softmax16.js +3 -4
  88. package/dist/ops/grads/unpack16.js +3 -4
  89. package/dist/ops/grads/utils.d.ts +1 -0
  90. package/dist/ops/grads/utils.js +8 -4
  91. package/dist/ops/matMul16.js +3 -3
  92. package/dist/ops/matMulGelu.js +2 -2
  93. package/dist/ops/matMulMul.js +1 -1
  94. package/dist/ops/mul16.js +1 -1
  95. package/dist/ops/mulDrop.js +1 -1
  96. package/dist/ops/normRMS.js +1 -1
  97. package/dist/ops/pack16.js +3 -4
  98. package/dist/ops/qkv.js +4 -8
  99. package/dist/ops/reshape16.js +14 -16
  100. package/dist/ops/rope.d.ts +1 -1
  101. package/dist/ops/rope.js +3 -8
  102. package/dist/ops/scatterSub.js +1 -1
  103. package/dist/ops/slice16.js +2 -2
  104. package/dist/ops/softmax16.js +5 -8
  105. package/dist/ops/sub16.js +1 -1
  106. package/dist/ops/sum16.js +2 -2
  107. package/dist/ops/transpose16.js +23 -24
  108. package/dist/ops/unpack16.js +2 -2
  109. package/dist/ops/webgl/adamAdjust.js +2 -3
  110. package/dist/ops/webgl/adamMoments.js +1 -2
  111. package/dist/ops/webgl/appendCache.js +1 -2
  112. package/dist/ops/webgl/attentionMask.js +4 -5
  113. package/dist/ops/webgl/fusedSoftmax.js +4 -6
  114. package/dist/ops/webgl/gatherSub.js +6 -7
  115. package/dist/ops/webgl/gelu.js +2 -3
  116. package/dist/ops/webgl/log.js +11 -12
  117. package/dist/ops/webgl/matMul16.js +10 -11
  118. package/dist/ops/webgl/matMulGelu.js +7 -111
  119. package/dist/ops/webgl/matMulMul.js +9 -10
  120. package/dist/ops/webgl/mulDropout.js +8 -9
  121. package/dist/ops/webgl/normRMS.js +2 -3
  122. package/dist/ops/webgl/qkv.js +5 -6
  123. package/dist/ops/webgl/rope.js +7 -8
  124. package/dist/ops/webgl/scatterSub.js +5 -6
  125. package/dist/ops/webgpu/adamAdjust.js +10 -12
  126. package/dist/ops/webgpu/adamMoments.js +8 -10
  127. package/dist/ops/webgpu/add16.js +8 -9
  128. package/dist/ops/webgpu/appendCache.js +23 -25
  129. package/dist/ops/webgpu/attentionMask.js +8 -10
  130. package/dist/ops/webgpu/attentionMask32_program.js +2 -2
  131. package/dist/ops/webgpu/concat16.js +12 -14
  132. package/dist/ops/webgpu/gatherSub.js +11 -13
  133. package/dist/ops/webgpu/gelu.js +28 -29
  134. package/dist/ops/webgpu/matMul16.js +26 -28
  135. package/dist/ops/webgpu/matMul16_program.js +4 -5
  136. package/dist/ops/webgpu/mul16.js +9 -10
  137. package/dist/ops/webgpu/normRMS.js +15 -17
  138. package/dist/ops/webgpu/normRMSGrad.js +21 -28
  139. package/dist/ops/webgpu/pack16.js +12 -13
  140. package/dist/ops/webgpu/pack16_program.js +2 -2
  141. package/dist/ops/webgpu/qkv.js +16 -18
  142. package/dist/ops/webgpu/rope.js +25 -27
  143. package/dist/ops/webgpu/scatterSub.js +7 -9
  144. package/dist/ops/webgpu/slice16.js +21 -23
  145. package/dist/ops/webgpu/softmax16.js +17 -19
  146. package/dist/ops/webgpu/softmax16_program.js +2 -2
  147. package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
  148. package/dist/ops/webgpu/softmax16grad.js +7 -8
  149. package/dist/ops/webgpu/sub16.js +7 -8
  150. package/dist/ops/webgpu/sum16.js +18 -20
  151. package/dist/ops/webgpu/transpose16.js +19 -20
  152. package/dist/ops/webgpu/transpose16_program.js +2 -2
  153. package/dist/ops/webgpu/transpose16_shared_program.js +11 -12
  154. package/dist/ops/webgpu/unpack16.js +3 -4
  155. package/dist/ops/webgpu/utils/binary_op.js +7 -8
  156. package/dist/ops/webgpu/utils/reductions.js +14 -22
  157. package/dist/ops-B5yanEdW.js +476 -0
  158. package/dist/pack16-nQ6JaLo-.js +39 -0
  159. package/dist/patches/webgpu_backend.js +19 -20
  160. package/dist/patches/webgpu_base.js +1 -1
  161. package/dist/patches/webgpu_program.js +21 -22
  162. package/dist/{random_width-BVV9HveY.js → random_width-or-CEftb.js} +2506 -2761
  163. package/dist/range-BklejeeW.js +10 -0
  164. package/dist/relu-CP0ZcxWO.js +9 -0
  165. package/dist/reshape-ByE68wS9.js +9 -0
  166. package/dist/resize_nearest_neighbor-B19mCEg2.js +175 -0
  167. package/dist/rope-Ir4mTyD1.js +24 -0
  168. package/dist/{scatter_nd_util-C7zXRT_h.js → scatter_nd_util-lvSiX8q4.js} +1 -1
  169. package/dist/selu_util-kbhpTdYD.js +44 -0
  170. package/dist/{shared-CHhxz-O5.js → shared-DT1TkE6w.js} +1 -1
  171. package/dist/{shared-D2NP_CpY.js → shared-dntlHIDQ.js} +343 -345
  172. package/dist/slice-BfEGSH82.js +12 -0
  173. package/dist/{slice_util-DyjSAD0u.js → slice_util-uTKwiEpW.js} +1 -1
  174. package/dist/{softmax-C9JQEtnO.js → softmax-CA5jFsLR.js} +4 -5
  175. package/dist/split-CVLc0w--.js +9 -0
  176. package/dist/squeeze-C7Z2srUo.js +10 -0
  177. package/dist/stack-Cf4n9h0N.js +11 -0
  178. package/dist/step-CINUs5QB.js +261 -0
  179. package/dist/sum-DWAtNGez.js +11 -0
  180. package/dist/tensor-DJoc7gJU.js +8 -0
  181. package/dist/tensor1d-D11P_7Dp.js +11 -0
  182. package/dist/{tensor2d-CSB4KOb0.js → tensor2d-Bs9wZRc7.js} +6 -7
  183. package/dist/{tensor4d-D7bLqGqz.js → tensor4d-BARPdTaS.js} +6 -7
  184. package/dist/{tfjs_backend-CNkSTL0c.js → tfjs_backend-y1cvNhLA.js} +255 -264
  185. package/dist/tile-mbfagpsB.js +11 -0
  186. package/dist/training/Adam.js +2 -2
  187. package/dist/training/AdamExt.js +1 -1
  188. package/dist/training/DatasetBuilder.js +2 -2
  189. package/dist/training/FullTrainer.js +1 -1
  190. package/dist/training/Trainer.js +2 -2
  191. package/dist/training/sparseCrossEntropy.js +5 -5
  192. package/dist/transpose-ClWiBS_b.js +36 -0
  193. package/dist/unsorted_segment_sum-BDDhB_E6.js +277 -0
  194. package/dist/utilities/dummy.js +3 -3
  195. package/dist/utilities/multinomialCPU.js +2 -2
  196. package/dist/utilities/packed.d.ts +1 -4
  197. package/dist/utilities/packed.js +10 -745
  198. package/dist/utilities/performance.js +1 -1
  199. package/dist/utilities/profile.js +1 -1
  200. package/dist/utilities/safetensors.js +2 -2
  201. package/dist/utilities/sentences.js +5 -5
  202. package/dist/utilities/weights.js +2 -2
  203. package/dist/{variable-DzfrwYuP.js → variable-WawDEaAb.js} +1 -1
  204. package/dist/{webgpu_program-DzaQiqel.js → webgpu_program-DuOXPQol.js} +178 -172
  205. package/dist/{webgpu_util-0_ubCEHJ.js → webgpu_util-RxEF33Rj.js} +34 -35
  206. package/dist/zeros-KnWaWf-X.js +13 -0
  207. package/dist/zeros_like-DvE73F4e.js +721 -0
  208. package/package.json +4 -2
  209. package/dist/Reshape-CDVLyVfz.js +0 -16
  210. package/dist/broadcast_to-B0ChcDaz.js +0 -30
  211. package/dist/complex-BBiRlsVq.js +0 -13
  212. package/dist/concat-DmBLPVGC.js +0 -19
  213. package/dist/dropout-B1x1kYMa.js +0 -99
  214. package/dist/expand_dims-ouvfxQ1n.js +0 -13
  215. package/dist/gather-CH9sdacz.js +0 -10
  216. package/dist/index-D6Q1lPZO.js +0 -2157
  217. package/dist/log_sum_exp-D3ftBNY5.js +0 -41
  218. package/dist/mat_mul-C59XWcJd.js +0 -12
  219. package/dist/mod-DESSvHIU.js +0 -12
  220. package/dist/mulmat_packed_gpu-Coh6qbJk.js +0 -55
  221. package/dist/ones-jU9jlQvM.js +0 -15
  222. package/dist/ops-BFDtP6th.js +0 -645
  223. package/dist/pack16-CmVZs6af.js +0 -41
  224. package/dist/patches/PackedTensor.d.ts +0 -12
  225. package/dist/patches/PackedTensor.js +0 -11
  226. package/dist/patches/engine.d.ts +0 -261
  227. package/dist/patches/engine.js +0 -12
  228. package/dist/patches/tape.d.ts +0 -12
  229. package/dist/patches/tape.js +0 -5
  230. package/dist/range-ZZZD60Fx.js +0 -11
  231. package/dist/reciprocal-CrYlsAGD.js +0 -10
  232. package/dist/register_all_kernels-nvj2k7OC.js +0 -12307
  233. package/dist/relu-BYDneVPn.js +0 -10
  234. package/dist/reshape-CaPQzFvz.js +0 -10
  235. package/dist/rope-s4W2XO9B.js +0 -32
  236. package/dist/selu_util-BGPXmd4B.js +0 -303
  237. package/dist/sin-Djs4aQiu.js +0 -16
  238. package/dist/slice-DvovR5wq.js +0 -13
  239. package/dist/split-DBck65sX.js +0 -10
  240. package/dist/squeeze-C00Ipm_7.js +0 -11
  241. package/dist/stack-ChnHwRpX.js +0 -13
  242. package/dist/sum-ywRJj3Zr.js +0 -12
  243. package/dist/tensor-0r5yOo2R.js +0 -8
  244. package/dist/tensor-CzmOBsdf.js +0 -909
  245. package/dist/tensor1d-BlUT89BP.js +0 -12
  246. package/dist/tensor_util-DfwaWayG.js +0 -523
  247. package/dist/tile-CR074jmp.js +0 -13
  248. package/dist/transpose-DH4gmHvu.js +0 -38
  249. package/dist/zeros-DBFVbpv5.js +0 -14
@@ -1,19 +1,18 @@
1
- import { l as t, n as r, m as k, o as z } from "../../index-D6Q1lPZO.js";
2
- import { r as A } from "../../tensor_util-DfwaWayG.js";
3
- function C(o) {
4
- const { moments: n, value: i } = o.inputs, { beta1: l, beta2: m, epsilon: u, learningRate: d } = o.attrs, e = n.shape.length, c = new Array(e).fill(0), s = n.shape.slice();
5
- s[e - 1] = 1;
6
- const a = c.slice();
7
- a[e - 1] = 1;
8
- const p = s.slice(), b = n.slice(c, s).squeeze([e - 1]), M = n.slice(a, p).squeeze([e - 1]), f = t(b, l), g = t(M, m);
9
- return r(
10
- k(t(f, r(z(g), u ?? 1e-8)), -d),
11
- i
1
+ import { p as k, w as t, x as i, m as w, y as z } from "../../index-DOvlwCh-.js";
2
+ function A(c) {
3
+ const { moments: s, value: r } = c.inputs, { beta1: l, beta2: m, epsilon: u, learningRate: d } = c.attrs, e = s.shape.length, a = new Array(e).fill(0), n = s.shape.slice();
4
+ n[e - 1] = 1;
5
+ const o = a.slice();
6
+ o[e - 1] = 1;
7
+ const p = n.slice(), b = s.slice(a, n).squeeze([e - 1]), M = s.slice(o, p).squeeze([e - 1]), g = t(b, l), f = t(M, m);
8
+ return i(
9
+ w(t(g, i(z(f), u ?? 1e-8)), -d),
10
+ r
12
11
  );
13
12
  }
14
- const h = {
13
+ const C = {
15
14
  kernelName: "AdamAdjust",
16
15
  backendName: "cpu",
17
- kernelFunc: C
16
+ kernelFunc: A
18
17
  };
19
- A(h);
18
+ k(C);
@@ -1,12 +1,11 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as p } from "../../tensor_util-DfwaWayG.js";
3
- import { s as b } from "../../stack-ChnHwRpX.js";
1
+ import { p } from "../../index-DOvlwCh-.js";
2
+ import { s as b } from "../../stack-Cf4n9h0N.js";
4
3
  function f(t) {
5
- const { moments: n, gradient: o } = t.inputs, { beta1: c, beta2: m } = t.attrs, e = n.shape.length, a = new Array(e).fill(0), s = n.shape.slice();
4
+ const { moments: n, gradient: c } = t.inputs, { beta1: o, beta2: m } = t.attrs, e = n.shape.length, a = new Array(e).fill(0), s = n.shape.slice();
6
5
  s[e - 1] = 1;
7
- const r = a.slice();
8
- r[e - 1] = 1;
9
- const i = s.slice(), l = n.slice(a, s).squeeze([e - 1]), u = n.slice(r, i).squeeze([e - 1]), M = l.mul(c).add(o.mul(1 - c)), d = u.mul(m).add(o.square().mul(1 - m));
6
+ const i = a.slice();
7
+ i[e - 1] = 1;
8
+ const r = s.slice(), l = n.slice(a, s).squeeze([e - 1]), u = n.slice(i, r).squeeze([e - 1]), M = l.mul(o).add(c.mul(1 - o)), d = u.mul(m).add(c.square().mul(1 - m));
10
9
  return b([M, d], -1);
11
10
  }
12
11
  const g = {
@@ -1,13 +1,12 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as d } from "../../tensor_util-DfwaWayG.js";
3
- import { c as h } from "../../concat-DmBLPVGC.js";
1
+ import { p as d } from "../../index-DOvlwCh-.js";
2
+ import { c as h } from "../../concat-BV8bt5H-.js";
4
3
  function u(p) {
5
- const { cache: n, item: s } = p.inputs, { maxSize: a, pastLen: c } = p.attrs, t = n.shape[0], o = n.shape[1], r = n.shape[3], e = s.shape[2];
6
- if (c + e <= a) {
7
- const m = n.slice([0, 0, 0, 0], [t, o, c, r]), f = n.slice([0, 0, c + e, 0], [t, o, a - c - e, r]), i = e < e ? s.slice([0, 0, 0, 0], [t, o, e, r]) : s, k = h([m, i, f], 2);
8
- return m.dispose(), f.dispose(), i !== s && i.dispose(), k;
4
+ const { cache: n, item: s } = p.inputs, { maxSize: i, pastLen: c } = p.attrs, t = n.shape[0], o = n.shape[1], a = n.shape[3], e = s.shape[2];
5
+ if (c + e <= i) {
6
+ const f = n.slice([0, 0, 0, 0], [t, o, c, a]), m = n.slice([0, 0, c + e, 0], [t, o, i - c - e, a]), r = e < e ? s.slice([0, 0, 0, 0], [t, o, e, a]) : s, k = h([f, r, m], 2);
7
+ return f.dispose(), m.dispose(), r !== s && r.dispose(), k;
9
8
  }
10
- const l = n.slice([0, 0, e, 0], [t, o, a - e, r]), C = h([l, s], 2);
9
+ const l = n.slice([0, 0, e, 0], [t, o, i - e, a]), C = h([l, s], 2);
11
10
  return l.dispose(), C;
12
11
  }
13
12
  const w = {
@@ -1,11 +1,11 @@
1
- import { i as d, b as u } from "../../index-D6Q1lPZO.js";
2
- import { r as o } from "../../tensor_util-DfwaWayG.js";
3
- import { l as N, w as b } from "../../ops-BFDtP6th.js";
4
- import { o as A } from "../../ones-jU9jlQvM.js";
5
- import { z as I } from "../../zeros-DBFVbpv5.js";
6
- import { m as g } from "../../mat_mul-C59XWcJd.js";
1
+ import { p as o, q as d, b as u } from "../../index-DOvlwCh-.js";
2
+ import { l as N } from "../../ops-B5yanEdW.js";
3
+ import { o as b } from "../../ones-D2rT0xk2.js";
4
+ import { z as A } from "../../zeros-KnWaWf-X.js";
5
+ import { w as I } from "../../resize_nearest_neighbor-B19mCEg2.js";
6
+ import { m as g } from "../../mat_mul-SjpJRLyL.js";
7
7
  function a(n) {
8
- const { q: s, k: e } = n.inputs, { divisor: r } = n.attrs, c = s.shape[2], t = e.shape[2], m = N.bandPart(A([t, t]), -1, 0).cast("bool"), i = I([t, t]), l = d([t, t], Number.NEGATIVE_INFINITY), f = b(m, i, l), k = g(s, e, !1, !0).mul(u(r)), p = f.slice([0, 0], [c, t]).expandDims(0).expandDims(0);
8
+ const { q: s, k: e } = n.inputs, { divisor: r } = n.attrs, c = s.shape[2], t = e.shape[2], m = N.bandPart(b([t, t]), -1, 0).cast("bool"), i = A([t, t]), l = d([t, t], Number.NEGATIVE_INFINITY), f = I(m, i, l), k = g(s, e, !1, !0).mul(u(r)), p = f.slice([0, 0], [c, t]).expandDims(0).expandDims(0);
9
9
  return k.add(p);
10
10
  }
11
11
  const w = {
@@ -1,30 +1,29 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as e } from "../../tensor_util-DfwaWayG.js";
3
- import { s as m } from "../../softmax-C9JQEtnO.js";
4
- function o(t) {
5
- const { inputs: s, attrs: a } = t, { logits: n } = s, { dim: i, dropoutRate: r } = a;
6
- if (!n)
1
+ import { p as e } from "../../index-DOvlwCh-.js";
2
+ import { s as m } from "../../softmax-CA5jFsLR.js";
3
+ function n(t) {
4
+ const { inputs: s, attrs: a } = t, { logits: o } = s, { dim: i, dropoutRate: r } = a;
5
+ if (!o)
7
6
  throw new Error("Error in softmax: input logits is null");
8
- return r !== void 0 && r > 0 && console.warn("Dropout in fusedSoftmax not implemented for CPU backend, skipping dropout."), m(n, i);
7
+ return r !== void 0 && r > 0 && console.warn("Dropout in fusedSoftmax not implemented for CPU backend, skipping dropout."), m(o, i);
9
8
  }
10
9
  const f = {
11
10
  kernelName: "FusedSoftmax",
12
11
  backendName: "cpu",
13
- kernelFunc: o
12
+ kernelFunc: n
14
13
  };
15
14
  e(f);
16
15
  const u = {
17
16
  kernelName: "FusedSoftmax",
18
17
  backendName: "tensorflow",
19
- kernelFunc: o
18
+ kernelFunc: n
20
19
  };
21
20
  e(u);
22
21
  const l = {
23
22
  kernelName: "FusedSoftmax",
24
23
  backendName: "webgpu",
25
- kernelFunc: o
24
+ kernelFunc: n
26
25
  };
27
26
  e(l);
28
27
  export {
29
- o as softmaxCPU
28
+ n as softmaxCPU
30
29
  };
@@ -1,15 +1,14 @@
1
- import { A as u, B as c, E as m, c as g } from "../../index-D6Q1lPZO.js";
2
- import { k as p, r as h } from "../../tensor_util-DfwaWayG.js";
3
- import { r as f } from "../../range-ZZZD60Fx.js";
4
- import { s as l } from "../../stack-ChnHwRpX.js";
5
- function N(e, t) {
6
- const n = c(t, "indices", "gatherND", "int32"), r = { params: c(e, "x", "gatherND", "string_or_numeric"), indices: n };
7
- return m.runKernel(p, r);
1
+ import { A as u, B as c, E as g, aj as p, p as h, c as m } from "../../index-DOvlwCh-.js";
2
+ import { r as l } from "../../range-BklejeeW.js";
3
+ import { s as N } from "../../stack-Cf4n9h0N.js";
4
+ function f(e, t) {
5
+ const n = c(t, "indices", "gatherND", "int32"), s = { params: c(e, "x", "gatherND", "string_or_numeric"), indices: n };
6
+ return g.runKernel(p, s);
8
7
  }
9
- const b = /* @__PURE__ */ u({ gatherND_: N });
8
+ const b = /* @__PURE__ */ u({ gatherND_: f });
10
9
  function d(e) {
11
- const { values: t, labels: n, logits: s } = e.inputs, r = n.shape[0], o = f(0, r, 1, "int32"), i = l([o, n], 1), a = b(s, i);
12
- return g(t, a);
10
+ const { values: t, labels: n, logits: r } = e.inputs, s = n.shape[0], a = l(0, s, 1, "int32"), i = N([a, n], 1), o = b(r, i);
11
+ return m(t, o);
13
12
  }
14
13
  const k = {
15
14
  kernelName: "EfficientGatherSub",
@@ -1,8 +1,7 @@
1
- import { t as d } from "../../index-D6Q1lPZO.js";
2
- import { r } from "../../tensor_util-DfwaWayG.js";
1
+ import { p as t, t as d } from "../../index-DOvlwCh-.js";
3
2
  const o = 0.7978845608028654, c = 0.044715;
4
- function m(t) {
5
- const { inputs: u } = t, { x: n } = u, e = n;
3
+ function m(r) {
4
+ const { inputs: u } = r, { x: n } = u, e = n;
6
5
  return d(() => {
7
6
  const l = e.pow(3), s = e.add(l.mul(c)).mul(o).tanh().add(1).mul(0.5);
8
7
  return e.mul(s);
@@ -13,15 +12,15 @@ const N = {
13
12
  backendName: "cpu",
14
13
  kernelFunc: m
15
14
  };
16
- r(N);
15
+ t(N);
17
16
  const K = {
18
17
  kernelName: "Gelu",
19
18
  backendName: "tensorflow",
20
19
  kernelFunc: m
21
20
  };
22
- r(K);
23
- function i(t) {
24
- const { dy: u, x: n } = t.inputs;
21
+ t(K);
22
+ function i(r) {
23
+ const { dy: u, x: n } = r.inputs;
25
24
  return d(() => {
26
25
  const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), p = g.add(G);
27
26
  return u.mul(p);
@@ -32,10 +31,10 @@ const x = {
32
31
  backendName: "cpu",
33
32
  kernelFunc: i
34
33
  };
35
- r(x);
34
+ t(x);
36
35
  const h = {
37
36
  kernelName: "GeluGrad",
38
37
  backendName: "tensorflow",
39
38
  kernelFunc: i
40
39
  };
41
- r(h);
40
+ t(h);
@@ -1,16 +1,15 @@
1
1
  import { isPackedTensor as t } from "../../utilities/packed.js";
2
- import "../../index-D6Q1lPZO.js";
3
- import { r as p } from "../../tensor_util-DfwaWayG.js";
4
- import { m } from "../../mat_mul-C59XWcJd.js";
5
- function l(r) {
6
- const { A: e, B: n } = r.inputs, { transposeA: o, transposeB: s } = r.attrs, a = !t(e), c = !t(n);
2
+ import { p } from "../../index-DOvlwCh-.js";
3
+ import { m as l } from "../../mat_mul-SjpJRLyL.js";
4
+ function m(e) {
5
+ const { A: n, B: r } = e.inputs, { transposeA: o, transposeB: s } = e.attrs, a = !t(n), c = !t(r);
7
6
  if (a && c)
8
- return m(e, n, o, s);
7
+ return l(n, r, o, s);
9
8
  throw new Error("MatMul16 CPU kernel only supports packed tensors currently.");
10
9
  }
11
10
  const u = {
12
11
  kernelName: "MatMul16",
13
12
  backendName: "cpu",
14
- kernelFunc: l
13
+ kernelFunc: m
15
14
  };
16
15
  p(u);
@@ -1,12 +1,11 @@
1
- import { t as m } from "../../index-D6Q1lPZO.js";
2
- import { g as i, d as M } from "../../gelu-Bmhopi0J.js";
3
- import { r as e } from "../../tensor_util-DfwaWayG.js";
4
- import { m as k } from "../../mat_mul-C59XWcJd.js";
1
+ import { p as e, t as m } from "../../index-DOvlwCh-.js";
2
+ import { g as M, d as i } from "../../gelu-CjNPL4OH.js";
3
+ import { m as k } from "../../mat_mul-SjpJRLyL.js";
5
4
  function c(t) {
6
5
  const { inputs: u } = t, { x: n, kernel: r } = u, a = n, l = r;
7
6
  return m(() => {
8
7
  const o = k(a, l);
9
- return i(o);
8
+ return M(o);
10
9
  });
11
10
  }
12
11
  const G = {
@@ -30,7 +29,7 @@ e(p);
30
29
  function s(t) {
31
30
  const { dy: u, x: n, kernel: r } = t.inputs;
32
31
  return m(() => {
33
- const a = k(n, r), l = M(u, a), o = l.matMul(r.transpose()), d = n.transpose().matMul(l);
32
+ const a = k(n, r), l = i(u, a), o = l.matMul(r.transpose()), d = n.transpose().matMul(l);
34
33
  return [o, d];
35
34
  });
36
35
  }
@@ -1,8 +1,7 @@
1
- import { t as M } from "../../index-D6Q1lPZO.js";
2
- import { r as e } from "../../tensor_util-DfwaWayG.js";
1
+ import { p as e, t as i } from "../../index-DOvlwCh-.js";
3
2
  function n(t) {
4
- const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, i = a, k = c;
5
- return M(() => m.matMul(i, o, s).mul(k));
3
+ const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, k = a, M = c;
4
+ return i(() => m.matMul(k, o, s).mul(M));
6
5
  }
7
6
  const p = {
8
7
  kernelName: "MatMulMul",
@@ -1,8 +1,7 @@
1
- import { m as u } from "../../index-D6Q1lPZO.js";
2
- import { r as e } from "../../tensor_util-DfwaWayG.js";
1
+ import { p as e, m as t } from "../../index-DOvlwCh-.js";
3
2
  function n(o) {
4
- const { inputs: r } = o, { a: l, b: t } = r;
5
- return console.warn("Using fallback mulDrop implementation without dropout."), u(l, t);
3
+ const { inputs: r } = o, { a: l, b: u } = r;
4
+ return console.warn("Using fallback mulDrop implementation without dropout."), t(l, u);
6
5
  }
7
6
  const a = {
8
7
  kernelName: "MulDropout",
@@ -1,29 +1,28 @@
1
- import { t as d } from "../../index-D6Q1lPZO.js";
2
- import { r as a } from "../../tensor_util-DfwaWayG.js";
1
+ import { p as o, t as d } from "../../index-DOvlwCh-.js";
3
2
  function i(t) {
4
- const { inputs: e } = t, { x: n, gamma: s } = e, r = n, m = s;
3
+ const { inputs: e } = t, { x: n, gamma: s } = e, r = n, a = s;
5
4
  return d(() => {
6
5
  const u = r.square().mean(-1, !0).add(1e-8).rsqrt();
7
- return r.mul(u).mul(m);
6
+ return r.mul(u).mul(a);
8
7
  });
9
8
  }
10
- const k = {
9
+ const f = {
11
10
  kernelName: "RMSNorm",
12
11
  backendName: "cpu",
13
12
  kernelFunc: i
14
13
  };
15
- a(k);
14
+ o(f);
16
15
  const g = {
17
16
  kernelName: "RMSNorm",
18
17
  backendName: "tensorflow",
19
18
  kernelFunc: i
20
19
  };
21
- a(g);
20
+ o(g);
22
21
  function N(t) {
23
22
  const { dy: e, x: n, gamma: s } = t.inputs;
24
23
  return d(() => {
25
- const r = n.shape[n.shape.length - 1], m = n.square().mean(-1, !0), o = m.add(1e-8).rsqrt(), u = n.mul(o), l = e.mul(u).sum([0, 1]), c = e.mul(s), f = c.mul(n).sum(-1, !0).div(r);
26
- return [c.mul(o).sub(n.mul(f).mul(o).div(m.add(1e-8))), l];
24
+ const r = n.shape[n.shape.length - 1], a = n.square().mean(-1, !0), m = a.add(1e-8).rsqrt(), u = n.mul(m), l = e.mul(u).sum([0, 1]), c = e.mul(s), k = c.mul(n).sum(-1, !0).div(r);
25
+ return [c.mul(m).sub(n.mul(k).mul(m).div(a.add(1e-8))), l];
27
26
  });
28
27
  }
29
28
  const S = {
@@ -31,10 +30,10 @@ const S = {
31
30
  backendName: "cpu",
32
31
  kernelFunc: N
33
32
  };
34
- a(S);
33
+ o(S);
35
34
  const p = {
36
35
  kernelName: "RMSNormGrad",
37
36
  backendName: "tensorflow",
38
37
  kernelFunc: N
39
38
  };
40
- a(p);
39
+ o(p);
@@ -1,7 +1,6 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as q } from "../../tensor_util-DfwaWayG.js";
3
- import { r as o } from "../../reshape-CaPQzFvz.js";
4
- import { s as x } from "../../split-DBck65sX.js";
1
+ import { p as q } from "../../index-DOvlwCh-.js";
2
+ import { r as o } from "../../reshape-ByE68wS9.js";
3
+ import { s as x } from "../../split-CVLc0w--.js";
5
4
  function v(p) {
6
5
  const { x: c, kernel: K } = p.inputs, { heads: n, packed: C } = p.attrs;
7
6
  if (C)
@@ -10,18 +9,18 @@ function v(p) {
10
9
  a.dispose();
11
10
  const d = o(i, [s, e, 3 * t]);
12
11
  i.dispose();
13
- const [k, m, l] = x(d, 3, -1);
12
+ const [k, l, m] = x(d, 3, -1);
14
13
  d.dispose();
15
14
  const r = t / n, f = o(k, [s, e, n, r]);
16
15
  k.dispose();
17
16
  const w = f.transpose([0, 2, 1, 3]);
18
17
  f.dispose();
19
- const h = o(m, [s, e, n, r]);
20
- m.dispose();
18
+ const h = o(l, [s, e, n, r]);
19
+ l.dispose();
21
20
  const N = h.transpose([0, 2, 1, 3]);
22
21
  h.dispose();
23
- const u = o(l, [s, e, n, r]);
24
- l.dispose();
22
+ const u = o(m, [s, e, n, r]);
23
+ m.dispose();
25
24
  const T = u.transpose([0, 2, 1, 3]);
26
25
  return u.dispose(), [w, N, T];
27
26
  }
@@ -1,9 +1,8 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { r as I } from "../../tensor_util-DfwaWayG.js";
3
- import { r as y } from "../../range-ZZZD60Fx.js";
4
- import { g as F } from "../../gather-CH9sdacz.js";
5
- import { s as E } from "../../stack-ChnHwRpX.js";
6
- import { c as T } from "../../concat-DmBLPVGC.js";
1
+ import { p as I } from "../../index-DOvlwCh-.js";
2
+ import { r as y } from "../../range-BklejeeW.js";
3
+ import { g as F } from "../../gather-D3JcZUaI.js";
4
+ import { s as E } from "../../stack-Cf4n9h0N.js";
5
+ import { c as T } from "../../concat-BV8bt5H-.js";
7
6
  function U(c, r, p, e, n) {
8
7
  const t = e.shape[3], s = p;
9
8
  if (s > t) return e;
@@ -1,25 +1,23 @@
1
- import { A as f, B as c, E as g, c as l, m as N } from "../../index-D6Q1lPZO.js";
2
- import { j as b, r as S } from "../../tensor_util-DfwaWayG.js";
3
- import { d as h } from "../../tensor-CzmOBsdf.js";
4
- import { v as D } from "../../scatter_nd_util-C7zXRT_h.js";
5
- import { r as k } from "../../range-ZZZD60Fx.js";
6
- import { s as v } from "../../stack-ChnHwRpX.js";
7
- import { o as E } from "../../ones-jU9jlQvM.js";
8
- function I(r, e, s) {
9
- h(s);
10
- const n = c(r, "indices", "scatterND", "int32"), t = c(e, "updates", "scatterND");
1
+ import { A as f, C as g, B as r, E as l, ai as N, p as b, c as S, m as h } from "../../index-DOvlwCh-.js";
2
+ import { v as D } from "../../scatter_nd_util-lvSiX8q4.js";
3
+ import { r as k } from "../../range-BklejeeW.js";
4
+ import { s as v } from "../../stack-Cf4n9h0N.js";
5
+ import { o as E } from "../../ones-D2rT0xk2.js";
6
+ function I(a, e, s) {
7
+ g(s);
8
+ const n = r(a, "indices", "scatterND", "int32"), t = r(e, "updates", "scatterND");
11
9
  D(t, n, s);
12
- const o = { indices: n, updates: t }, a = { shape: s };
13
- return g.runKernel(b, o, a);
10
+ const c = { indices: n, updates: t }, o = { shape: s };
11
+ return l.runKernel(N, c, o);
14
12
  }
15
- const K = /* @__PURE__ */ f({ scatterND_: I });
16
- function L(r) {
17
- const { logits: e, labels: s, dy: n } = r.inputs, t = s.shape[0], o = e.shape[1], a = k(0, t, 1, "int32"), i = v([a, s], 1), d = E([t]), p = K(i, d, [t, o]), u = l(e, p), m = n.reshape([t, 1]);
18
- return N(u, m);
13
+ const C = /* @__PURE__ */ f({ scatterND_: I });
14
+ function K(a) {
15
+ const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u = C(i, d, [t, c]), p = S(e, u), m = n.reshape([t, 1]);
16
+ return h(p, m);
19
17
  }
20
- const T = {
18
+ const L = {
21
19
  kernelName: "EfficientScatterSub",
22
20
  backendName: "cpu",
23
- kernelFunc: L
21
+ kernelFunc: K
24
22
  };
25
- S(T);
23
+ b(L);
package/dist/ops/dot16.js CHANGED
@@ -1,8 +1,8 @@
1
- import { b as d } from "../matMul16-fEAJ4smh.js";
1
+ import { b as d } from "../matMul16-BWRSOCWB.js";
2
2
  import { transpose16 as w } from "./transpose16.js";
3
3
  import { reshape16 as n } from "./reshape16.js";
4
4
  import { isPackedTensor as p } from "../utilities/packed.js";
5
- import { d as x } from "../tfjs_backend-CNkSTL0c.js";
5
+ import { d as x } from "../tfjs_backend-y1cvNhLA.js";
6
6
  function E(e, s, h = !1, c = !1) {
7
7
  if (!p(e) && !p(s))
8
8
  return x(e, s);
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-D6Q1lPZO.js";
1
+ import { e as n } from "../index-DOvlwCh-.js";
2
2
  import "./cpu/gatherSub.js";
3
3
  import "./webgl/gatherSub.js";
4
4
  function f(r, e, t) {
package/dist/ops/gelu.js CHANGED
@@ -1,7 +1,7 @@
1
- import "../index-D6Q1lPZO.js";
1
+ import "../index-DOvlwCh-.js";
2
2
  import "./cpu/gelu.js";
3
3
  import "./webgl/gelu.js";
4
- import { d as e, g as i } from "../gelu-Bmhopi0J.js";
4
+ import { d as e, g as i } from "../gelu-CjNPL4OH.js";
5
5
  export {
6
6
  e as dGelu,
7
7
  i as gelu
@@ -1,27 +1,26 @@
1
- import { j as u, q as d } from "../../index-D6Q1lPZO.js";
2
- import { sum16 as p } from "../sum16.js";
3
- import { reshape16 as c } from "../reshape16.js";
4
- import { a as h } from "../../tensor_util-DfwaWayG.js";
5
- const m = {
1
+ import { u as i, a3 as h, a4 as d } from "../../index-DOvlwCh-.js";
2
+ import { sum16 as c } from "../sum16.js";
3
+ import { reshape16 as p } from "../reshape16.js";
4
+ const A = {
6
5
  kernelName: "Add16",
7
6
  inputsToSave: ["a", "b"],
8
- gradFunc: (s, i) => {
9
- const [t, a] = i, n = u(t.shape, a.shape);
7
+ gradFunc: (s, u) => {
8
+ const [t, a] = u, n = h(t.shape, a.shape);
10
9
  if (Array.isArray(s))
11
10
  throw new Error("Add16 gradFunc expected dy to be a Tensor but got an array");
12
11
  return { a: () => {
13
12
  let e = s;
14
13
  const r = d(t.shape, n);
15
- r.length > 0 && (e = p(e, r));
16
- const o = c(e, t.shape);
14
+ r.length > 0 && (e = c(e, r));
15
+ const o = p(e, t.shape);
17
16
  return e.dispose(), o;
18
17
  }, b: () => {
19
18
  let e = s;
20
19
  const r = d(a.shape, n);
21
- r.length > 0 && (e = p(e, r));
22
- const o = c(e, a.shape);
20
+ r.length > 0 && (e = c(e, r));
21
+ const o = p(e, a.shape);
23
22
  return e.dispose(), o;
24
23
  } };
25
24
  }
26
25
  };
27
- h(m);
26
+ i(A);
@@ -1,7 +1,6 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { m as o } from "../../matMul16-fEAJ4smh.js";
3
- import { transpose16 as m } from "../transpose16.js";
4
- import { a as c } from "../../tensor_util-DfwaWayG.js";
1
+ import { u as m } from "../../index-DOvlwCh-.js";
2
+ import { m as o } from "../../matMul16-BWRSOCWB.js";
3
+ import { transpose16 as c } from "../transpose16.js";
5
4
  const l = {
6
5
  kernelName: "AttentionMask",
7
6
  inputsToSave: ["q", "k"],
@@ -13,10 +12,10 @@ const l = {
13
12
  return {
14
13
  q: () => o(r, i, e),
15
14
  k: () => {
16
- const t = o(a, r, e, !0, !1), u = m(t, [0, 1, 3, 2]);
15
+ const t = o(a, r, e, !0, !1), u = c(t, [0, 1, 3, 2]);
17
16
  return t.dispose(), u;
18
17
  }
19
18
  };
20
19
  }
21
20
  };
22
- c(l);
21
+ m(l);
@@ -1,6 +1,5 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { a as m } from "../../gelu-Bmhopi0J.js";
3
- import "../../tensor_util-DfwaWayG.js";
1
+ import "../../index-DOvlwCh-.js";
2
+ import { a as e } from "../../gelu-CjNPL4OH.js";
4
3
  export {
5
- m as geluGradConfig
4
+ e as geluGradConfig
6
5
  };
@@ -1,9 +1,8 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { a as f } from "../../matMul16-fEAJ4smh.js";
3
- import "../../gelu-Bmhopi0J.js";
1
+ import "../../index-DOvlwCh-.js";
2
+ import { a } from "../../matMul16-BWRSOCWB.js";
3
+ import "../../gelu-CjNPL4OH.js";
4
4
  import "../transpose16.js";
5
5
  import "../reshape16.js";
6
- import "../../tensor_util-DfwaWayG.js";
7
6
  export {
8
- f as matMul16GradConfig
7
+ a as matMul16GradConfig
9
8
  };
@@ -1,18 +1,17 @@
1
- import { e as l } from "../../index-D6Q1lPZO.js";
2
- import { a as o } from "../../tensor_util-DfwaWayG.js";
3
- function i(e, r, n) {
4
- return l().runKernel("MatMulGeluGrad", { dy: e, x: r, kernel: n });
1
+ import { u as a, e as o } from "../../index-DOvlwCh-.js";
2
+ function s(e, n, r) {
3
+ return o().runKernel("MatMulGeluGrad", { dy: e, x: n, kernel: r });
5
4
  }
6
- const s = {
5
+ const d = {
7
6
  kernelName: "MatMulGelu",
8
7
  inputsToSave: ["x", "kernel"],
9
8
  outputsToSave: [],
10
- gradFunc: (e, r) => {
11
- const [n, t] = r, [u, a] = i(e, n, t);
9
+ gradFunc: (e, n) => {
10
+ const [r, u] = n, [t, l] = s(e, r, u);
12
11
  return {
13
- x: () => u,
14
- kernel: () => a
12
+ x: () => t,
13
+ kernel: () => l
15
14
  };
16
15
  }
17
16
  };
18
- o(s);
17
+ a(d);
@@ -1,21 +1,20 @@
1
- import { e as t } from "../../index-D6Q1lPZO.js";
2
- import { a as g } from "../../tensor_util-DfwaWayG.js";
3
- function i(r, a, m) {
4
- return t().runKernel("RMSNormGrad", { dy: r, x: a, gamma: m });
1
+ import { u as t, e as u } from "../../index-DOvlwCh-.js";
2
+ function g(r, a, n) {
3
+ return u().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
5
4
  }
6
5
  const s = {
7
6
  kernelName: "RMSNorm",
8
7
  inputsToSave: ["x", "gamma"],
9
8
  outputsToSave: [],
10
9
  gradFunc: (r, a) => {
11
- const [m, n] = a, [o, e] = i(r, m, n);
10
+ const [n, e] = a, [m, o] = g(r, n, e);
12
11
  return {
13
- x: () => o,
14
- gamma: () => e
12
+ x: () => m,
13
+ gamma: () => o
15
14
  };
16
15
  }
17
16
  };
18
- g(s);
17
+ t(s);
19
18
  export {
20
19
  s as normRMSGradConfig
21
20
  };
@@ -1,7 +1,6 @@
1
- import "../../index-D6Q1lPZO.js";
2
- import { b as t } from "../../pack16-CmVZs6af.js";
3
- import "../../slice-DvovR5wq.js";
4
- import "../../tensor_util-DfwaWayG.js";
1
+ import "../../index-DOvlwCh-.js";
2
+ import { b as i } from "../../pack16-nQ6JaLo-.js";
3
+ import "../../slice-BfEGSH82.js";
5
4
  export {
6
- t as packGradConfig
5
+ i as packGradConfig
7
6
  };