@genai-fi/nanogpt 0.10.3 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (238) hide show
  1. package/dist/Generator.d.ts +10 -5
  2. package/dist/Generator.js +1789 -1765
  3. package/dist/{RealDiv-KAPDe8zB.js → RealDiv-C8neBwFi.js} +15 -15
  4. package/dist/{Reshape-BYkmUnAv.js → Reshape-Bd4V_4X7.js} +1 -1
  5. package/dist/{Reshape-Zt6eb7yh.js → Reshape-Ck29jQSY.js} +5 -5
  6. package/dist/TeachableLLM.d.ts +5 -3
  7. package/dist/TeachableLLM.js +14 -14
  8. package/dist/Trainer.d.ts +3 -1
  9. package/dist/Trainer.js +11 -8
  10. package/dist/{axis_util-BaG7mf5A.js → axis_util-DGqbT-FX.js} +3 -3
  11. package/dist/backend.js +2 -2
  12. package/dist/{backend_util-RCe-rHaj.js → backend_util-DC3rBo_H.js} +18 -18
  13. package/dist/{backend_webgpu-DE3ACOLx.js → backend_webgpu-mbhNnlx9.js} +3 -3
  14. package/dist/{broadcast_to-B3eYlZm7.js → broadcast_to-D1Dmg2Oz.js} +2 -2
  15. package/dist/checks/appendCache.js +2 -2
  16. package/dist/checks/attentionMask.js +3 -3
  17. package/dist/checks/gelu.js +2 -2
  18. package/dist/checks/matMulGelu.js +2 -2
  19. package/dist/checks/normRMS.js +4 -4
  20. package/dist/checks/normRMSGrad.js +3 -3
  21. package/dist/checks/packUnpack.js +2 -2
  22. package/dist/checks/qkv.js +4 -4
  23. package/dist/checks/rope.js +2 -2
  24. package/dist/{clip_by_value-BnO7-a88.js → clip_by_value-fg2aKzUy.js} +5 -5
  25. package/dist/complex-Cyg-eQeZ.js +11 -0
  26. package/dist/concat-CSm2rMwe.js +17 -0
  27. package/dist/{concat_util-DpW8mL_l.js → concat_util-D0je5Ppu.js} +1 -1
  28. package/dist/{dataset-BcwmTGYc.js → dataset-CVIJu7Xa.js} +7 -7
  29. package/dist/{dropout-BcvN9JYi.js → dropout-DLhSMNTZ.js} +9 -9
  30. package/dist/expand_dims-ChkuOp6I.js +11 -0
  31. package/dist/{exports_initializers-Hta_rEnm.js → exports_initializers-1KWPiStI.js} +1 -1
  32. package/dist/{floor-D5QdR_le.js → floor-BRMPgeIs.js} +1 -1
  33. package/dist/{gather-D3JcZUaI.js → gather-BSULDalH.js} +1 -1
  34. package/dist/{gelu-CjNPL4OH.js → gelu-BK1k-n1i.js} +1 -1
  35. package/dist/{gpgpu_math-DAOmgtXR.js → gpgpu_math-BJSTk_mW.js} +25 -25
  36. package/dist/{index-BwexR4lA.js → index-BBVLAXZD.js} +89 -89
  37. package/dist/{index-DOvlwCh-.js → index-Duu1Lvvv.js} +53 -53
  38. package/dist/{kernel_funcs_utils-CCzYdUZg.js → kernel_funcs_utils-BtYrPoJu.js} +6 -6
  39. package/dist/layers/BaseLayer.js +2 -2
  40. package/dist/layers/CausalSelfAttention.js +6 -6
  41. package/dist/layers/MLP.js +4 -4
  42. package/dist/layers/PositionEmbedding.js +5 -5
  43. package/dist/layers/RMSNorm.js +3 -3
  44. package/dist/layers/RoPECache.js +4 -4
  45. package/dist/layers/TiedEmbedding.js +6 -6
  46. package/dist/layers/TransformerBlock.js +1 -1
  47. package/dist/loader/loadTransformers.js +1 -1
  48. package/dist/loader/oldZipLoad.js +9 -9
  49. package/dist/log_sum_exp-CVqLsVLl.js +39 -0
  50. package/dist/main.d.ts +10 -1
  51. package/dist/main.js +68 -58
  52. package/dist/{matMul16-BWRSOCWB.js → matMul16-xswmhSuF.js} +3 -3
  53. package/dist/{matMulGelu-CzfgT6Wq.js → matMulGelu-BpvgnYG8.js} +14 -14
  54. package/dist/mat_mul-Bn2BDpT4.js +11 -0
  55. package/dist/{mod-AnXEvvpo.js → mod-B4AUd1Np.js} +1 -1
  56. package/dist/models/NanoGPTV1.js +2 -2
  57. package/dist/models/model.js +9 -9
  58. package/dist/{ones-D2rT0xk2.js → ones-CBI1AQjb.js} +3 -3
  59. package/dist/ops/adamAdjust.js +1 -1
  60. package/dist/ops/adamMoments.js +1 -1
  61. package/dist/ops/add16.js +1 -1
  62. package/dist/ops/appendCache.js +3 -3
  63. package/dist/ops/attentionMask.js +1 -1
  64. package/dist/ops/concat16.js +2 -2
  65. package/dist/ops/cpu/adamAdjust.js +9 -9
  66. package/dist/ops/cpu/adamMoments.js +5 -5
  67. package/dist/ops/cpu/appendCache.js +6 -6
  68. package/dist/ops/cpu/attentionMask.js +10 -10
  69. package/dist/ops/cpu/fusedSoftmax.js +5 -5
  70. package/dist/ops/cpu/gatherSub.js +9 -9
  71. package/dist/ops/cpu/gelu.js +5 -5
  72. package/dist/ops/cpu/matMul16.js +2 -2
  73. package/dist/ops/cpu/matMulGelu.js +3 -3
  74. package/dist/ops/cpu/matMulMul.js +5 -5
  75. package/dist/ops/cpu/mulDropout.js +1 -1
  76. package/dist/ops/cpu/normRMS.js +7 -7
  77. package/dist/ops/cpu/qkv.js +3 -3
  78. package/dist/ops/cpu/rope.js +5 -5
  79. package/dist/ops/cpu/scatterSub.js +11 -11
  80. package/dist/ops/dot16.js +2 -2
  81. package/dist/ops/gatherSub.js +1 -1
  82. package/dist/ops/gelu.js +2 -2
  83. package/dist/ops/grads/add16.js +4 -4
  84. package/dist/ops/grads/attentionMask.js +2 -2
  85. package/dist/ops/grads/gelu.js +2 -2
  86. package/dist/ops/grads/matMul16.js +3 -3
  87. package/dist/ops/grads/matMulGelu.js +6 -6
  88. package/dist/ops/grads/normRMS.js +4 -4
  89. package/dist/ops/grads/pack16.js +3 -3
  90. package/dist/ops/grads/qkv.js +10 -10
  91. package/dist/ops/grads/rope.js +2 -2
  92. package/dist/ops/grads/softmax16.js +1 -1
  93. package/dist/ops/grads/unpack16.js +2 -2
  94. package/dist/ops/matMul16.js +3 -3
  95. package/dist/ops/matMulGelu.js +2 -2
  96. package/dist/ops/matMulMul.js +1 -1
  97. package/dist/ops/mul16.js +1 -1
  98. package/dist/ops/mulDrop.js +1 -1
  99. package/dist/ops/normRMS.js +1 -1
  100. package/dist/ops/pack16.js +2 -2
  101. package/dist/ops/qkv.js +1 -1
  102. package/dist/ops/reshape16.js +2 -2
  103. package/dist/ops/rope.js +2 -2
  104. package/dist/ops/scatterSub.js +1 -1
  105. package/dist/ops/slice16.js +2 -2
  106. package/dist/ops/softmax16.js +1 -1
  107. package/dist/ops/sub16.js +1 -1
  108. package/dist/ops/sum16.js +2 -2
  109. package/dist/ops/transpose16.js +6 -6
  110. package/dist/ops/unpack16.js +2 -2
  111. package/dist/ops/webgl/adamAdjust.js +2 -2
  112. package/dist/ops/webgl/adamMoments.js +1 -1
  113. package/dist/ops/webgl/appendCache.js +1 -1
  114. package/dist/ops/webgl/attentionMask.js +1 -1
  115. package/dist/ops/webgl/fusedSoftmax.js +4 -4
  116. package/dist/ops/webgl/gatherSub.js +1 -1
  117. package/dist/ops/webgl/gelu.js +2 -2
  118. package/dist/ops/webgl/log.js +3 -3
  119. package/dist/ops/webgl/matMul16.js +8 -8
  120. package/dist/ops/webgl/matMulGelu.js +4 -4
  121. package/dist/ops/webgl/matMulMul.js +7 -7
  122. package/dist/ops/webgl/mulDropout.js +1 -1
  123. package/dist/ops/webgl/normRMS.js +7 -7
  124. package/dist/ops/webgl/qkv.js +1 -1
  125. package/dist/ops/webgl/rope.js +1 -1
  126. package/dist/ops/webgl/scatterSub.js +1 -1
  127. package/dist/ops/webgpu/adamAdjust.js +3 -3
  128. package/dist/ops/webgpu/adamMoments.js +5 -5
  129. package/dist/ops/webgpu/add16.js +1 -1
  130. package/dist/ops/webgpu/appendCache.js +3 -3
  131. package/dist/ops/webgpu/attentionMask.js +2 -2
  132. package/dist/ops/webgpu/attentionMask32_program.js +2 -2
  133. package/dist/ops/webgpu/concat16.js +5 -5
  134. package/dist/ops/webgpu/gatherSub.js +5 -5
  135. package/dist/ops/webgpu/gelu.js +3 -3
  136. package/dist/ops/webgpu/matMul16.js +19 -19
  137. package/dist/ops/webgpu/matMul16_program.js +2 -2
  138. package/dist/ops/webgpu/mul16.js +4 -4
  139. package/dist/ops/webgpu/normRMS.js +6 -6
  140. package/dist/ops/webgpu/normRMSGrad.js +4 -4
  141. package/dist/ops/webgpu/pack16.js +3 -3
  142. package/dist/ops/webgpu/pack16_program.js +2 -2
  143. package/dist/ops/webgpu/qkv.js +8 -8
  144. package/dist/ops/webgpu/rope.js +3 -3
  145. package/dist/ops/webgpu/scatterSub.js +3 -3
  146. package/dist/ops/webgpu/slice16.js +4 -4
  147. package/dist/ops/webgpu/softmax16.js +4 -4
  148. package/dist/ops/webgpu/softmax16_program.js +2 -2
  149. package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
  150. package/dist/ops/webgpu/softmax16grad.js +1 -1
  151. package/dist/ops/webgpu/sub16.js +4 -4
  152. package/dist/ops/webgpu/sum16.js +5 -5
  153. package/dist/ops/webgpu/transpose16.js +2 -2
  154. package/dist/ops/webgpu/transpose16_program.js +2 -2
  155. package/dist/ops/webgpu/transpose16_shared_program.js +3 -3
  156. package/dist/ops/webgpu/unpack16.js +5 -5
  157. package/dist/ops/webgpu/utils/binary_op.js +3 -3
  158. package/dist/ops/webgpu/utils/reductions.js +4 -4
  159. package/dist/{ops-B5yanEdW.js → ops-C2_OXuZ4.js} +69 -69
  160. package/dist/{pack16-nQ6JaLo-.js → pack16-atD0eYRm.js} +9 -9
  161. package/dist/patches/webgpu_backend.js +6 -6
  162. package/dist/patches/webgpu_base.js +1 -1
  163. package/dist/patches/webgpu_program.js +8 -8
  164. package/dist/{random_width-or-CEftb.js → random_width-BN4wGJaW.js} +33 -33
  165. package/dist/range-DKmP1-OQ.js +10 -0
  166. package/dist/relu-BsXmGzzu.js +9 -0
  167. package/dist/{reshape-ByE68wS9.js → reshape-BI0yzp1T.js} +1 -1
  168. package/dist/{resize_nearest_neighbor-B19mCEg2.js → resize_nearest_neighbor-BA_BX-ub.js} +26 -26
  169. package/dist/{rope-Ir4mTyD1.js → rope-DJ7Y7c-u.js} +1 -1
  170. package/dist/{scatter_nd_util-lvSiX8q4.js → scatter_nd_util-k9MUVUkn.js} +1 -1
  171. package/dist/{selu_util-kbhpTdYD.js → selu_util-DyW0X1WG.js} +5 -5
  172. package/dist/{shared-DT1TkE6w.js → shared-Q3BS6T03.js} +1 -1
  173. package/dist/{shared-dntlHIDQ.js → shared-nnSWpC3u.js} +86 -86
  174. package/dist/{slice-BfEGSH82.js → slice-wBNvzVyz.js} +1 -1
  175. package/dist/{slice_util-uTKwiEpW.js → slice_util-zN8KFC5I.js} +1 -1
  176. package/dist/{softmax-CA5jFsLR.js → softmax-DfuYyjMh.js} +1 -1
  177. package/dist/split-BYrLboMq.js +9 -0
  178. package/dist/squeeze-Bk8Brcct.js +10 -0
  179. package/dist/{stack-Cf4n9h0N.js → stack-CDWShFHF.js} +1 -1
  180. package/dist/{step-CINUs5QB.js → step-BS5JXRR6.js} +23 -23
  181. package/dist/{sum-DWAtNGez.js → sum-BPUfDB2X.js} +3 -3
  182. package/dist/tensor-CEt9Nm2s.js +8 -0
  183. package/dist/tensor1d-Cc_KCIDg.js +11 -0
  184. package/dist/{tensor2d-Bs9wZRc7.js → tensor2d-BN97fF71.js} +3 -3
  185. package/dist/{tensor4d-BARPdTaS.js → tensor4d-vuDDgdUI.js} +1 -1
  186. package/dist/{tfjs_backend-y1cvNhLA.js → tfjs_backend-806hyYve.js} +49 -49
  187. package/dist/{tile-mbfagpsB.js → tile-OWUvpIVt.js} +3 -3
  188. package/dist/tokeniser/BaseTokeniser.d.ts +25 -0
  189. package/dist/tokeniser/BaseTokeniser.js +94 -0
  190. package/dist/tokeniser/CharTokeniser.d.ts +10 -9
  191. package/dist/tokeniser/CharTokeniser.js +44 -30
  192. package/dist/tokeniser/bpe.d.ts +10 -9
  193. package/dist/tokeniser/bpe.js +67 -52
  194. package/dist/tokeniser/type.d.ts +14 -5
  195. package/dist/training/Adam.js +2 -2
  196. package/dist/training/AdamExt.js +1 -1
  197. package/dist/training/DatasetBuilder.d.ts +3 -3
  198. package/dist/training/DatasetBuilder.js +34 -38
  199. package/dist/training/FullTrainer.js +1 -1
  200. package/dist/training/Trainer.d.ts +4 -3
  201. package/dist/training/Trainer.js +22 -25
  202. package/dist/training/sparseCrossEntropy.js +3 -3
  203. package/dist/training/tasks/ConversationTask.d.ts +11 -0
  204. package/dist/training/tasks/ConversationTask.js +26 -0
  205. package/dist/training/tasks/PretrainingTask.d.ts +11 -0
  206. package/dist/training/tasks/PretrainingTask.js +34 -0
  207. package/dist/training/tasks/StartSentenceTask.d.ts +12 -0
  208. package/dist/training/tasks/StartSentenceTask.js +42 -0
  209. package/dist/training/tasks/Task.d.ts +8 -0
  210. package/dist/training/tasks/Task.js +41 -0
  211. package/dist/{transpose-ClWiBS_b.js → transpose-BUkQCJp9.js} +6 -6
  212. package/dist/{unsorted_segment_sum-BDDhB_E6.js → unsorted_segment_sum-BljxHhCY.js} +5 -5
  213. package/dist/utilities/dummy.js +3 -3
  214. package/dist/utilities/multinomialCPU.js +2 -2
  215. package/dist/utilities/packed.js +1 -1
  216. package/dist/utilities/performance.js +1 -1
  217. package/dist/utilities/profile.js +1 -1
  218. package/dist/utilities/safetensors.js +2 -2
  219. package/dist/utilities/sentences.d.ts +1 -1
  220. package/dist/utilities/sentences.js +11 -11
  221. package/dist/utilities/weights.js +2 -2
  222. package/dist/{variable-WawDEaAb.js → variable-DPt_Iuog.js} +1 -1
  223. package/dist/{webgpu_program-DuOXPQol.js → webgpu_program-BpWRlghH.js} +3 -3
  224. package/dist/{webgpu_util-RxEF33Rj.js → webgpu_util-DMiKzzQM.js} +7 -7
  225. package/dist/{zeros-KnWaWf-X.js → zeros-5YROwwUH.js} +2 -2
  226. package/dist/{zeros_like-DvE73F4e.js → zeros_like-De4n1C3m.js} +71 -71
  227. package/package.json +1 -1
  228. package/dist/complex-DjxcVmoX.js +0 -11
  229. package/dist/concat-BV8bt5H-.js +0 -17
  230. package/dist/expand_dims-DT4tEPwA.js +0 -11
  231. package/dist/log_sum_exp-ngO0-4pK.js +0 -39
  232. package/dist/mat_mul-SjpJRLyL.js +0 -11
  233. package/dist/range-BklejeeW.js +0 -10
  234. package/dist/relu-CP0ZcxWO.js +0 -9
  235. package/dist/split-CVLc0w--.js +0 -9
  236. package/dist/squeeze-C7Z2srUo.js +0 -10
  237. package/dist/tensor-DJoc7gJU.js +0 -8
  238. package/dist/tensor1d-D11P_7Dp.js +0 -11
@@ -1,11 +1,11 @@
1
- import { p as C, t as R, e as I, h as G, a3 as L, l as F, ak as U } from "./index-DOvlwCh-.js";
2
- import { r as M } from "./Reshape-Zt6eb7yh.js";
3
- import { u as H } from "./gpgpu_math-DAOmgtXR.js";
4
- import { m as B } from "./mat_mul-SjpJRLyL.js";
1
+ import { f as C, t as R, e as I, U as G, _ as L, x as U, ak as F } from "./index-Duu1Lvvv.js";
2
+ import { r as M } from "./Reshape-Ck29jQSY.js";
3
+ import { u as H } from "./gpgpu_math-BJSTk_mW.js";
4
+ import { m as B } from "./mat_mul-Bn2BDpT4.js";
5
5
  class W {
6
6
  constructor(e, s, a, n = !1, o = !1, r = !1, i = null, u = !1, l = !1) {
7
7
  this.variableNames = ["matrixA", "matrixB"], this.packedInputs = !0, this.packedOutput = !0, this.outputShape = a, this.enableShapeUniforms = H(this.outputShape.length);
8
- const p = n ? e[1] : e[2], h = Math.ceil(p / 2), d = n ? "i * 2, rc.y" : "rc.y, i * 2", x = o ? "rc.z, i * 2" : "i * 2, rc.z", b = n ? ["a.xxyy", "a.zzww"] : ["a.xxzz", "a.yyww"], m = o ? ["b.xzxz", "b.ywyw"] : ["b.xyxy", "b.zwzw"];
8
+ const p = n ? e[1] : e[2], h = Math.ceil(p / 2), d = n ? "i * 2, rc.y" : "rc.y, i * 2", b = o ? "rc.z, i * 2" : "i * 2, rc.z", x = n ? ["a.xxyy", "a.zzww"] : ["a.xxzz", "a.yyww"], m = o ? ["b.xzxz", "b.ywyw"] : ["b.xyxy", "b.zwzw"];
9
9
  let c = "", g = "";
10
10
  i && (u ? c = `vec4 activation(vec4 a) {
11
11
  vec4 b = getPreluActivationWeightsAtOutCoords();
@@ -30,12 +30,12 @@ class W {
30
30
  int batchB = ${v};
31
31
  for (int i = 0; i < ${h}; i++) {
32
32
  vec4 a = getMatrixA(batchA, ${d});
33
- vec4 b = getMatrixB(batchB, ${x});
33
+ vec4 b = getMatrixB(batchB, ${b});
34
34
 
35
35
  // These swizzled products need to be separately added.
36
36
  // See: https://github.com/tensorflow/tfjs/issues/1735
37
- result += (${b[0]} * ${m[0]});
38
- result += (${b[1]} * ${m[1]});
37
+ result += (${x[0]} * ${m[0]});
38
+ result += (${x[1]} * ${m[1]});
39
39
  }
40
40
  return result;
41
41
  }
@@ -90,24 +90,24 @@ function O({
90
90
  activationSnippet: o,
91
91
  multiplier: r
92
92
  }) {
93
- const i = t.shape.length, u = e.shape.length, l = s ? t.shape[i - 2] : t.shape[i - 1], p = a ? e.shape[u - 1] : e.shape[u - 2], h = s ? t.shape[i - 1] : t.shape[i - 2], d = a ? e.shape[u - 2] : e.shape[u - 1], x = t.shape.slice(0, -2), b = e.shape.slice(0, -2), m = G(x), c = G(b), $ = L(t.shape.slice(0, -2), e.shape.slice(0, -2)).concat([h, d]);
94
- F(
93
+ const i = t.shape.length, u = e.shape.length, l = s ? t.shape[i - 2] : t.shape[i - 1], p = a ? e.shape[u - 1] : e.shape[u - 2], h = s ? t.shape[i - 1] : t.shape[i - 2], d = a ? e.shape[u - 2] : e.shape[u - 1], b = t.shape.slice(0, -2), x = e.shape.slice(0, -2), m = G(b), c = G(x), $ = L(t.shape.slice(0, -2), e.shape.slice(0, -2)).concat([h, d]);
94
+ U(
95
95
  l === p,
96
96
  () => `Error in matMul: inner shapes (${l}) and (${p}) of Tensors with shapes ${t.shape} and ${e.shape} and transposeA=${s} and transposeB=${a} must match.`
97
97
  );
98
- const f = s ? [m, l, h] : [m, h, l], v = a ? [c, d, p] : [c, p, d], A = M({ inputs: { x: t }, backend: n, attrs: { shape: f } }), y = M({ inputs: { x: e }, backend: n, attrs: { shape: v } }), k = [A, y], E = Math.max(m, c), N = o, T = U(t.dtype, e.dtype), _ = new W(
98
+ const f = s ? [m, l, h] : [m, h, l], v = a ? [c, d, p] : [c, p, d], A = M({ inputs: { x: t }, backend: n, attrs: { shape: f } }), y = M({ inputs: { x: e }, backend: n, attrs: { shape: v } }), k = [A, y], _ = Math.max(m, c), E = o, N = F(t.dtype, e.dtype), T = new W(
99
99
  f,
100
100
  v,
101
- [E, h, d],
101
+ [_, h, d],
102
102
  s,
103
103
  a,
104
104
  !1,
105
- N,
105
+ E,
106
106
  !!r,
107
107
  !1
108
108
  ), D = [A, y];
109
109
  r && D.push(r);
110
- const z = n.runWebGLProgram(_, D, T), K = M({ inputs: { x: z }, backend: n, attrs: { shape: $ } });
110
+ const z = n.runWebGLProgram(T, D, N), K = M({ inputs: { x: z }, backend: n, attrs: { shape: $ } });
111
111
  k.push(z);
112
112
  for (const P of k)
113
113
  n.disposeIntermediateTensorInfo(P);
@@ -0,0 +1,11 @@
1
+ import { o as m, q as s, B as c, E as M, D as p } from "./index-Duu1Lvvv.js";
2
+ function f(e, o, n = !1, l = !1) {
3
+ let a = s(e, "a", "matMul"), t = s(o, "b", "matMul");
4
+ [a, t] = c(a, t);
5
+ const r = { a, b: t }, u = { transposeA: n, transposeB: l };
6
+ return M.runKernel(p, r, u);
7
+ }
8
+ const i = /* @__PURE__ */ m({ matMul_: f });
9
+ export {
10
+ i as m
11
+ };
@@ -1,4 +1,4 @@
1
- import { A as r, B as s, L as m, E as c, N as d } from "./index-DOvlwCh-.js";
1
+ import { o as r, q as s, B as m, E as c, M as d } from "./index-Duu1Lvvv.js";
2
2
  function p(t, e) {
3
3
  let o = s(t, "a", "mod"), a = s(e, "b", "mod");
4
4
  [o, a] = m(o, a);
@@ -3,11 +3,11 @@ import b from "../layers/TransformerBlock.js";
3
3
  import k from "../layers/TiedEmbedding.js";
4
4
  import w from "../layers/RoPECache.js";
5
5
  import E from "../layers/RMSNorm.js";
6
- import { t as l, k as u } from "../index-DOvlwCh-.js";
6
+ import { t as l, k as u } from "../index-Duu1Lvvv.js";
7
7
  import C from "./model.js";
8
8
  import P from "../layers/PositionEmbedding.js";
9
9
  import { packingSupported as _ } from "../utilities/packed.js";
10
- import { p as y, u as M } from "../pack16-nQ6JaLo-.js";
10
+ import { p as y, u as M } from "../pack16-atD0eYRm.js";
11
11
  class I extends C {
12
12
  wte;
13
13
  // Token embeddings
@@ -1,23 +1,23 @@
1
1
  import m from "../layers/BaseLayer.js";
2
- import "../index-DOvlwCh-.js";
3
- import "../random_width-or-CEftb.js";
4
- import "../zeros_like-DvE73F4e.js";
2
+ import "../index-Duu1Lvvv.js";
3
+ import "../random_width-BN4wGJaW.js";
4
+ import "../zeros_like-De4n1C3m.js";
5
5
  import "../Generator.js";
6
6
  import "../index-Cp39cXWe.js";
7
- import "../dataset-BcwmTGYc.js";
7
+ import "../dataset-CVIJu7Xa.js";
8
8
  import "../ops/cpu/attentionMask.js";
9
9
  import "../ops/webgl/attentionMask.js";
10
10
  import "../ops/grads/attentionMask.js";
11
11
  import "../ops/cpu/rope.js";
12
12
  import "../ops/webgl/rope.js";
13
- import "../rope-Ir4mTyD1.js";
13
+ import "../rope-DJ7Y7c-u.js";
14
14
  import "../ops/cpu/appendCache.js";
15
15
  import "../ops/webgl/appendCache.js";
16
16
  import "../ops/grads/softmax16.js";
17
- import "../matMul16-BWRSOCWB.js";
17
+ import "../matMul16-xswmhSuF.js";
18
18
  import "../ops/webgl/matMul16.js";
19
19
  import "../ops/cpu/matMul16.js";
20
- import "../pack16-nQ6JaLo-.js";
20
+ import "../pack16-atD0eYRm.js";
21
21
  import "../ops/transpose16.js";
22
22
  import "../ops/reshape16.js";
23
23
  import "../ops/cpu/qkv.js";
@@ -40,11 +40,11 @@ import "../ops/webgl/scatterSub.js";
40
40
  import "../ops/cpu/gatherSub.js";
41
41
  import "../ops/webgl/gatherSub.js";
42
42
  import "../ops/cpu/matMulGelu.js";
43
- import "../matMulGelu-CzfgT6Wq.js";
43
+ import "../matMulGelu-BpvgnYG8.js";
44
44
  import "../ops/grads/matMulGelu.js";
45
45
  import "../ops/cpu/gelu.js";
46
46
  import "../ops/webgl/gelu.js";
47
- import "../gelu-CjNPL4OH.js";
47
+ import "../gelu-BK1k-n1i.js";
48
48
  import "../ops/webgl/log.js";
49
49
  import "../checks/normRMS.js";
50
50
  import "../checks/normRMSGrad.js";
@@ -1,6 +1,6 @@
1
- import { C as n, _ as t, h as m, E as i } from "./index-DOvlwCh-.js";
2
- import { c as f } from "./complex-DjxcVmoX.js";
3
- import { z as c } from "./zeros-KnWaWf-X.js";
1
+ import { u as n, V as t, U as m, E as i } from "./index-Duu1Lvvv.js";
2
+ import { c as f } from "./complex-Cyg-eQeZ.js";
3
+ import { z as c } from "./zeros-5YROwwUH.js";
4
4
  function l(o, r = "float32") {
5
5
  if (n(o), r === "complex64") {
6
6
  const s = l(o, "float32"), a = c(o, "float32");
@@ -1,4 +1,4 @@
1
- import { e as i } from "../index-DOvlwCh-.js";
1
+ import { e as i } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/adamAdjust.js";
3
3
  import "./webgl/adamAdjust.js";
4
4
  function p(r, t, e, n, m, o) {
@@ -1,4 +1,4 @@
1
- import { e as t } from "../index-DOvlwCh-.js";
1
+ import { e as t } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/adamMoments.js";
3
3
  import "./webgl/adamMoments.js";
4
4
  function s(e, n, r, m, o) {
package/dist/ops/add16.js CHANGED
@@ -1,4 +1,4 @@
1
- import { x as t, e as o } from "../index-DOvlwCh-.js";
1
+ import { l as t, e as o } from "../index-Duu1Lvvv.js";
2
2
  import { isPackedTensor as n } from "../utilities/packed.js";
3
3
  import "./grads/add16.js";
4
4
  function m(r, e) {
@@ -1,9 +1,9 @@
1
- import { e as a } from "../index-DOvlwCh-.js";
1
+ import { e as a } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/appendCache.js";
3
3
  import "./webgl/appendCache.js";
4
4
  import { isPackedTensor as c } from "../utilities/packed.js";
5
- import { c as t } from "../concat-BV8bt5H-.js";
6
- import { z as f } from "../zeros-KnWaWf-X.js";
5
+ import { c as t } from "../concat-CSm2rMwe.js";
6
+ import { z as f } from "../zeros-5YROwwUH.js";
7
7
  function C(r, o, n, p) {
8
8
  if (!p) {
9
9
  const e = r.shape[2], s = c(r);
@@ -1,4 +1,4 @@
1
- import { e as r } from "../index-DOvlwCh-.js";
1
+ import { e as r } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/attentionMask.js";
3
3
  import "./webgl/attentionMask.js";
4
4
  import "./grads/attentionMask.js";
@@ -1,6 +1,6 @@
1
1
  import { isPackedTensor as o } from "../utilities/packed.js";
2
- import { e } from "../index-DOvlwCh-.js";
3
- import { c } from "../concat-BV8bt5H-.js";
2
+ import { e } from "../index-Duu1Lvvv.js";
3
+ import { c } from "../concat-CSm2rMwe.js";
4
4
  function p(r, n) {
5
5
  return o(r[0]) ? e().runKernel("Concat16", r, { axis: n ?? -1 }) : c(r, n);
6
6
  }
@@ -1,18 +1,18 @@
1
- import { p as k, w as t, x as i, m as w, y as z } from "../../index-DOvlwCh-.js";
2
- function A(c) {
3
- const { moments: s, value: r } = c.inputs, { beta1: l, beta2: m, epsilon: u, learningRate: d } = c.attrs, e = s.shape.length, a = new Array(e).fill(0), n = s.shape.slice();
4
- n[e - 1] = 1;
1
+ import { f as k, j as t, l as i, m as z, n as A } from "../../index-Duu1Lvvv.js";
2
+ function C(c) {
3
+ const { moments: n, value: r } = c.inputs, { beta1: l, beta2: m, epsilon: u, learningRate: d } = c.attrs, e = n.shape.length, a = new Array(e).fill(0), s = n.shape.slice();
4
+ s[e - 1] = 1;
5
5
  const o = a.slice();
6
6
  o[e - 1] = 1;
7
- const p = n.slice(), b = s.slice(a, n).squeeze([e - 1]), M = s.slice(o, p).squeeze([e - 1]), g = t(b, l), f = t(M, m);
7
+ const b = s.slice(), p = n.slice(a, s).squeeze([e - 1]), M = n.slice(o, b).squeeze([e - 1]), f = t(p, l), g = t(M, m);
8
8
  return i(
9
- w(t(g, i(z(f), u ?? 1e-8)), -d),
9
+ z(t(f, i(A(g), u ?? 1e-8)), -d),
10
10
  r
11
11
  );
12
12
  }
13
- const C = {
13
+ const h = {
14
14
  kernelName: "AdamAdjust",
15
15
  backendName: "cpu",
16
- kernelFunc: A
16
+ kernelFunc: C
17
17
  };
18
- k(C);
18
+ k(h);
@@ -1,16 +1,16 @@
1
- import { p } from "../../index-DOvlwCh-.js";
2
- import { s as b } from "../../stack-Cf4n9h0N.js";
3
- function f(t) {
1
+ import { f as p } from "../../index-Duu1Lvvv.js";
2
+ import { s as f } from "../../stack-CDWShFHF.js";
3
+ function b(t) {
4
4
  const { moments: n, gradient: c } = t.inputs, { beta1: o, beta2: m } = t.attrs, e = n.shape.length, a = new Array(e).fill(0), s = n.shape.slice();
5
5
  s[e - 1] = 1;
6
6
  const i = a.slice();
7
7
  i[e - 1] = 1;
8
8
  const r = s.slice(), l = n.slice(a, s).squeeze([e - 1]), u = n.slice(i, r).squeeze([e - 1]), M = l.mul(o).add(c.mul(1 - o)), d = u.mul(m).add(c.square().mul(1 - m));
9
- return b([M, d], -1);
9
+ return f([M, d], -1);
10
10
  }
11
11
  const g = {
12
12
  kernelName: "AdamMoments",
13
13
  backendName: "cpu",
14
- kernelFunc: f
14
+ kernelFunc: b
15
15
  };
16
16
  p(g);
@@ -1,13 +1,13 @@
1
- import { p as d } from "../../index-DOvlwCh-.js";
2
- import { c as h } from "../../concat-BV8bt5H-.js";
1
+ import { f as d } from "../../index-Duu1Lvvv.js";
2
+ import { c as h } from "../../concat-CSm2rMwe.js";
3
3
  function u(p) {
4
4
  const { cache: n, item: s } = p.inputs, { maxSize: i, pastLen: c } = p.attrs, t = n.shape[0], o = n.shape[1], a = n.shape[3], e = s.shape[2];
5
5
  if (c + e <= i) {
6
- const f = n.slice([0, 0, 0, 0], [t, o, c, a]), m = n.slice([0, 0, c + e, 0], [t, o, i - c - e, a]), r = e < e ? s.slice([0, 0, 0, 0], [t, o, e, a]) : s, k = h([f, r, m], 2);
7
- return f.dispose(), m.dispose(), r !== s && r.dispose(), k;
6
+ const l = n.slice([0, 0, 0, 0], [t, o, c, a]), m = n.slice([0, 0, c + e, 0], [t, o, i - c - e, a]), r = e < e ? s.slice([0, 0, 0, 0], [t, o, e, a]) : s, k = h([l, r, m], 2);
7
+ return l.dispose(), m.dispose(), r !== s && r.dispose(), k;
8
8
  }
9
- const l = n.slice([0, 0, e, 0], [t, o, i - e, a]), C = h([l, s], 2);
10
- return l.dispose(), C;
9
+ const f = n.slice([0, 0, e, 0], [t, o, i - e, a]), C = h([f, s], 2);
10
+ return f.dispose(), C;
11
11
  }
12
12
  const w = {
13
13
  kernelName: "AppendCache",
@@ -1,22 +1,22 @@
1
- import { p as o, q as d, b as u } from "../../index-DOvlwCh-.js";
2
- import { l as N } from "../../ops-B5yanEdW.js";
3
- import { o as b } from "../../ones-D2rT0xk2.js";
4
- import { z as A } from "../../zeros-KnWaWf-X.js";
5
- import { w as I } from "../../resize_nearest_neighbor-B19mCEg2.js";
6
- import { m as g } from "../../mat_mul-SjpJRLyL.js";
1
+ import { f as o, h as d, b as u } from "../../index-Duu1Lvvv.js";
2
+ import { l as N } from "../../ops-C2_OXuZ4.js";
3
+ import { o as b } from "../../ones-CBI1AQjb.js";
4
+ import { z as A } from "../../zeros-5YROwwUH.js";
5
+ import { w as I } from "../../resize_nearest_neighbor-BA_BX-ub.js";
6
+ import { m as g } from "../../mat_mul-Bn2BDpT4.js";
7
7
  function a(n) {
8
8
  const { q: s, k: e } = n.inputs, { divisor: r } = n.attrs, c = s.shape[2], t = e.shape[2], m = N.bandPart(b([t, t]), -1, 0).cast("bool"), i = A([t, t]), l = d([t, t], Number.NEGATIVE_INFINITY), f = I(m, i, l), k = g(s, e, !1, !0).mul(u(r)), p = f.slice([0, 0], [c, t]).expandDims(0).expandDims(0);
9
9
  return k.add(p);
10
10
  }
11
- const w = {
11
+ const h = {
12
12
  kernelName: "AttentionMask",
13
13
  backendName: "cpu",
14
14
  kernelFunc: a
15
15
  };
16
- o(w);
17
- const M = {
16
+ o(h);
17
+ const w = {
18
18
  kernelName: "AttentionMask",
19
19
  backendName: "tensorflow",
20
20
  kernelFunc: a
21
21
  };
22
- o(M);
22
+ o(w);
@@ -1,17 +1,17 @@
1
- import { p as e } from "../../index-DOvlwCh-.js";
2
- import { s as m } from "../../softmax-CA5jFsLR.js";
1
+ import { f as e } from "../../index-Duu1Lvvv.js";
2
+ import { s as f } from "../../softmax-DfuYyjMh.js";
3
3
  function n(t) {
4
4
  const { inputs: s, attrs: a } = t, { logits: o } = s, { dim: i, dropoutRate: r } = a;
5
5
  if (!o)
6
6
  throw new Error("Error in softmax: input logits is null");
7
- return r !== void 0 && r > 0 && console.warn("Dropout in fusedSoftmax not implemented for CPU backend, skipping dropout."), m(o, i);
7
+ return r !== void 0 && r > 0 && console.warn("Dropout in fusedSoftmax not implemented for CPU backend, skipping dropout."), f(o, i);
8
8
  }
9
- const f = {
9
+ const m = {
10
10
  kernelName: "FusedSoftmax",
11
11
  backendName: "cpu",
12
12
  kernelFunc: n
13
13
  };
14
- e(f);
14
+ e(m);
15
15
  const u = {
16
16
  kernelName: "FusedSoftmax",
17
17
  backendName: "tensorflow",
@@ -1,18 +1,18 @@
1
- import { A as u, B as c, E as g, aj as p, p as h, c as m } from "../../index-DOvlwCh-.js";
2
- import { r as l } from "../../range-BklejeeW.js";
3
- import { s as N } from "../../stack-Cf4n9h0N.js";
4
- function f(e, t) {
1
+ import { o as u, q as c, E as g, aj as h, f as m, c as p } from "../../index-Duu1Lvvv.js";
2
+ import { r as f } from "../../range-DKmP1-OQ.js";
3
+ import { s as l } from "../../stack-CDWShFHF.js";
4
+ function N(e, t) {
5
5
  const n = c(t, "indices", "gatherND", "int32"), s = { params: c(e, "x", "gatherND", "string_or_numeric"), indices: n };
6
- return g.runKernel(p, s);
6
+ return g.runKernel(h, s);
7
7
  }
8
- const b = /* @__PURE__ */ u({ gatherND_: f });
8
+ const b = /* @__PURE__ */ u({ gatherND_: N });
9
9
  function d(e) {
10
- const { values: t, labels: n, logits: r } = e.inputs, s = n.shape[0], a = l(0, s, 1, "int32"), i = N([a, n], 1), o = b(r, i);
11
- return m(t, o);
10
+ const { values: t, labels: n, logits: r } = e.inputs, s = n.shape[0], a = f(0, s, 1, "int32"), o = l([a, n], 1), i = b(r, o);
11
+ return p(t, i);
12
12
  }
13
13
  const k = {
14
14
  kernelName: "EfficientGatherSub",
15
15
  backendName: "cpu",
16
16
  kernelFunc: d
17
17
  };
18
- h(k);
18
+ m(k);
@@ -1,4 +1,4 @@
1
- import { p as t, t as d } from "../../index-DOvlwCh-.js";
1
+ import { f as t, t as d } from "../../index-Duu1Lvvv.js";
2
2
  const o = 0.7978845608028654, c = 0.044715;
3
3
  function m(r) {
4
4
  const { inputs: u } = r, { x: n } = u, e = n;
@@ -7,12 +7,12 @@ function m(r) {
7
7
  return e.mul(s);
8
8
  });
9
9
  }
10
- const N = {
10
+ const p = {
11
11
  kernelName: "Gelu",
12
12
  backendName: "cpu",
13
13
  kernelFunc: m
14
14
  };
15
- t(N);
15
+ t(p);
16
16
  const K = {
17
17
  kernelName: "Gelu",
18
18
  backendName: "tensorflow",
@@ -22,8 +22,8 @@ t(K);
22
22
  function i(r) {
23
23
  const { dy: u, x: n } = r.inputs;
24
24
  return d(() => {
25
- const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), p = g.add(G);
26
- return u.mul(p);
25
+ const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), N = g.add(G);
26
+ return u.mul(N);
27
27
  });
28
28
  }
29
29
  const x = {
@@ -1,6 +1,6 @@
1
1
  import { isPackedTensor as t } from "../../utilities/packed.js";
2
- import { p } from "../../index-DOvlwCh-.js";
3
- import { m as l } from "../../mat_mul-SjpJRLyL.js";
2
+ import { f as p } from "../../index-Duu1Lvvv.js";
3
+ import { m as l } from "../../mat_mul-Bn2BDpT4.js";
4
4
  function m(e) {
5
5
  const { A: n, B: r } = e.inputs, { transposeA: o, transposeB: s } = e.attrs, a = !t(n), c = !t(r);
6
6
  if (a && c)
@@ -1,6 +1,6 @@
1
- import { p as e, t as m } from "../../index-DOvlwCh-.js";
2
- import { g as M, d as i } from "../../gelu-CjNPL4OH.js";
3
- import { m as k } from "../../mat_mul-SjpJRLyL.js";
1
+ import { f as e, t as m } from "../../index-Duu1Lvvv.js";
2
+ import { g as M, d as i } from "../../gelu-BK1k-n1i.js";
3
+ import { m as k } from "../../mat_mul-Bn2BDpT4.js";
4
4
  function c(t) {
5
5
  const { inputs: u } = t, { x: n, kernel: r } = u, a = n, l = r;
6
6
  return m(() => {
@@ -1,20 +1,20 @@
1
- import { p as e, t as i } from "../../index-DOvlwCh-.js";
1
+ import { f as e, t as i } from "../../index-Duu1Lvvv.js";
2
2
  function n(t) {
3
3
  const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, k = a, M = c;
4
4
  return i(() => m.matMul(k, o, s).mul(M));
5
5
  }
6
- const p = {
6
+ const f = {
7
7
  kernelName: "MatMulMul",
8
8
  backendName: "cpu",
9
9
  kernelFunc: n
10
10
  };
11
- e(p);
12
- const f = {
11
+ e(f);
12
+ const p = {
13
13
  kernelName: "MatMulMul",
14
14
  backendName: "tensorflow",
15
15
  kernelFunc: n
16
16
  };
17
- e(f);
17
+ e(p);
18
18
  const g = {
19
19
  kernelName: "MatMulMul",
20
20
  backendName: "webgpu",
@@ -1,4 +1,4 @@
1
- import { p as e, m as t } from "../../index-DOvlwCh-.js";
1
+ import { f as e, m as t } from "../../index-Duu1Lvvv.js";
2
2
  function n(o) {
3
3
  const { inputs: r } = o, { a: l, b: u } = r;
4
4
  return console.warn("Using fallback mulDrop implementation without dropout."), t(l, u);
@@ -1,4 +1,4 @@
1
- import { p as o, t as d } from "../../index-DOvlwCh-.js";
1
+ import { f as o, t as d } from "../../index-Duu1Lvvv.js";
2
2
  function i(t) {
3
3
  const { inputs: e } = t, { x: n, gamma: s } = e, r = n, a = s;
4
4
  return d(() => {
@@ -6,12 +6,12 @@ function i(t) {
6
6
  return r.mul(u).mul(a);
7
7
  });
8
8
  }
9
- const f = {
9
+ const k = {
10
10
  kernelName: "RMSNorm",
11
11
  backendName: "cpu",
12
12
  kernelFunc: i
13
13
  };
14
- o(f);
14
+ o(k);
15
15
  const g = {
16
16
  kernelName: "RMSNorm",
17
17
  backendName: "tensorflow",
@@ -21,8 +21,8 @@ o(g);
21
21
  function N(t) {
22
22
  const { dy: e, x: n, gamma: s } = t.inputs;
23
23
  return d(() => {
24
- const r = n.shape[n.shape.length - 1], a = n.square().mean(-1, !0), m = a.add(1e-8).rsqrt(), u = n.mul(m), l = e.mul(u).sum([0, 1]), c = e.mul(s), k = c.mul(n).sum(-1, !0).div(r);
25
- return [c.mul(m).sub(n.mul(k).mul(m).div(a.add(1e-8))), l];
24
+ const r = n.shape[n.shape.length - 1], a = n.square().mean(-1, !0), m = a.add(1e-8).rsqrt(), u = n.mul(m), l = e.mul(u).sum([0, 1]), c = e.mul(s), f = c.mul(n).sum(-1, !0).div(r);
25
+ return [c.mul(m).sub(n.mul(f).mul(m).div(a.add(1e-8))), l];
26
26
  });
27
27
  }
28
28
  const S = {
@@ -31,9 +31,9 @@ const S = {
31
31
  kernelFunc: N
32
32
  };
33
33
  o(S);
34
- const p = {
34
+ const R = {
35
35
  kernelName: "RMSNormGrad",
36
36
  backendName: "tensorflow",
37
37
  kernelFunc: N
38
38
  };
39
- o(p);
39
+ o(R);
@@ -1,6 +1,6 @@
1
- import { p as q } from "../../index-DOvlwCh-.js";
2
- import { r as o } from "../../reshape-ByE68wS9.js";
3
- import { s as x } from "../../split-CVLc0w--.js";
1
+ import { f as q } from "../../index-Duu1Lvvv.js";
2
+ import { r as o } from "../../reshape-BI0yzp1T.js";
3
+ import { s as x } from "../../split-BYrLboMq.js";
4
4
  function v(p) {
5
5
  const { x: c, kernel: K } = p.inputs, { heads: n, packed: C } = p.attrs;
6
6
  if (C)
@@ -1,8 +1,8 @@
1
- import { p as I } from "../../index-DOvlwCh-.js";
2
- import { r as y } from "../../range-BklejeeW.js";
3
- import { g as F } from "../../gather-D3JcZUaI.js";
4
- import { s as E } from "../../stack-Cf4n9h0N.js";
5
- import { c as T } from "../../concat-BV8bt5H-.js";
1
+ import { f as I } from "../../index-Duu1Lvvv.js";
2
+ import { r as y } from "../../range-DKmP1-OQ.js";
3
+ import { g as F } from "../../gather-BSULDalH.js";
4
+ import { s as E } from "../../stack-CDWShFHF.js";
5
+ import { c as T } from "../../concat-CSm2rMwe.js";
6
6
  function U(c, r, p, e, n) {
7
7
  const t = e.shape[3], s = p;
8
8
  if (s > t) return e;
@@ -1,8 +1,8 @@
1
- import { A as f, C as g, B as r, E as l, ai as N, p as b, c as S, m as h } from "../../index-DOvlwCh-.js";
2
- import { v as D } from "../../scatter_nd_util-lvSiX8q4.js";
3
- import { r as k } from "../../range-BklejeeW.js";
4
- import { s as v } from "../../stack-Cf4n9h0N.js";
5
- import { o as E } from "../../ones-D2rT0xk2.js";
1
+ import { o as f, u as g, q as r, E as l, ai as N, f as b, c as S, m as h } from "../../index-Duu1Lvvv.js";
2
+ import { v as D } from "../../scatter_nd_util-k9MUVUkn.js";
3
+ import { r as k } from "../../range-DKmP1-OQ.js";
4
+ import { s as v } from "../../stack-CDWShFHF.js";
5
+ import { o as E } from "../../ones-CBI1AQjb.js";
6
6
  function I(a, e, s) {
7
7
  g(s);
8
8
  const n = r(a, "indices", "scatterND", "int32"), t = r(e, "updates", "scatterND");
@@ -10,14 +10,14 @@ function I(a, e, s) {
10
10
  const c = { indices: n, updates: t }, o = { shape: s };
11
11
  return l.runKernel(N, c, o);
12
12
  }
13
- const C = /* @__PURE__ */ f({ scatterND_: I });
14
- function K(a) {
15
- const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u = C(i, d, [t, c]), p = S(e, u), m = n.reshape([t, 1]);
13
+ const K = /* @__PURE__ */ f({ scatterND_: I });
14
+ function L(a) {
15
+ const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u = K(i, d, [t, c]), p = S(e, u), m = n.reshape([t, 1]);
16
16
  return h(p, m);
17
17
  }
18
- const L = {
18
+ const T = {
19
19
  kernelName: "EfficientScatterSub",
20
20
  backendName: "cpu",
21
- kernelFunc: K
21
+ kernelFunc: L
22
22
  };
23
- b(L);
23
+ b(T);
package/dist/ops/dot16.js CHANGED
@@ -1,8 +1,8 @@
1
- import { b as d } from "../matMul16-BWRSOCWB.js";
1
+ import { b as d } from "../matMul16-xswmhSuF.js";
2
2
  import { transpose16 as w } from "./transpose16.js";
3
3
  import { reshape16 as n } from "./reshape16.js";
4
4
  import { isPackedTensor as p } from "../utilities/packed.js";
5
- import { d as x } from "../tfjs_backend-y1cvNhLA.js";
5
+ import { d as x } from "../tfjs_backend-806hyYve.js";
6
6
  function E(e, s, h = !1, c = !1) {
7
7
  if (!p(e) && !p(s))
8
8
  return x(e, s);
@@ -1,4 +1,4 @@
1
- import { e as n } from "../index-DOvlwCh-.js";
1
+ import { e as n } from "../index-Duu1Lvvv.js";
2
2
  import "./cpu/gatherSub.js";
3
3
  import "./webgl/gatherSub.js";
4
4
  function f(r, e, t) {
package/dist/ops/gelu.js CHANGED
@@ -1,7 +1,7 @@
1
- import "../index-DOvlwCh-.js";
1
+ import "../index-Duu1Lvvv.js";
2
2
  import "./cpu/gelu.js";
3
3
  import "./webgl/gelu.js";
4
- import { d as e, g as i } from "../gelu-CjNPL4OH.js";
4
+ import { d as e, g as i } from "../gelu-BK1k-n1i.js";
5
5
  export {
6
6
  e as dGelu,
7
7
  i as gelu
@@ -1,11 +1,11 @@
1
- import { u as i, a3 as h, a4 as d } from "../../index-DOvlwCh-.js";
1
+ import { i as u, _ as h, $ as d } from "../../index-Duu1Lvvv.js";
2
2
  import { sum16 as c } from "../sum16.js";
3
3
  import { reshape16 as p } from "../reshape16.js";
4
4
  const A = {
5
5
  kernelName: "Add16",
6
6
  inputsToSave: ["a", "b"],
7
- gradFunc: (s, u) => {
8
- const [t, a] = u, n = h(t.shape, a.shape);
7
+ gradFunc: (s, i) => {
8
+ const [t, a] = i, n = h(t.shape, a.shape);
9
9
  if (Array.isArray(s))
10
10
  throw new Error("Add16 gradFunc expected dy to be a Tensor but got an array");
11
11
  return { a: () => {
@@ -23,4 +23,4 @@ const A = {
23
23
  } };
24
24
  }
25
25
  };
26
- i(A);
26
+ u(A);
@@ -1,5 +1,5 @@
1
- import { u as m } from "../../index-DOvlwCh-.js";
2
- import { m as o } from "../../matMul16-BWRSOCWB.js";
1
+ import { i as m } from "../../index-Duu1Lvvv.js";
2
+ import { m as o } from "../../matMul16-xswmhSuF.js";
3
3
  import { transpose16 as c } from "../transpose16.js";
4
4
  const l = {
5
5
  kernelName: "AttentionMask",