@genai-fi/nanogpt 0.10.2 → 0.10.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (249) hide show
  1. package/dist/Generator.js +11761 -171
  2. package/dist/{RealDiv-zz7FpkKX.js → RealDiv-KAPDe8zB.js} +23 -25
  3. package/dist/Reshape-BYkmUnAv.js +14 -0
  4. package/dist/{Reshape-CHdUjC72.js → Reshape-Zt6eb7yh.js} +18 -20
  5. package/dist/TeachableLLM.js +10 -11
  6. package/dist/{axis_util-BsIr9ZNu.js → axis_util-BaG7mf5A.js} +3 -3
  7. package/dist/backend.js +2 -2
  8. package/dist/{backend_util-B1XRLuq9.js → backend_util-RCe-rHaj.js} +72 -73
  9. package/dist/{backend_webgpu-CqpfEImu.js → backend_webgpu-DE3ACOLx.js} +45 -47
  10. package/dist/broadcast_to-B3eYlZm7.js +28 -0
  11. package/dist/checks/appendCache.js +2 -2
  12. package/dist/checks/attentionMask.js +3 -3
  13. package/dist/checks/gelu.js +2 -2
  14. package/dist/checks/matMulGelu.js +7 -11
  15. package/dist/checks/normRMS.js +9 -9
  16. package/dist/checks/normRMSGrad.js +3 -3
  17. package/dist/checks/packUnpack.js +2 -2
  18. package/dist/checks/qkv.js +12 -13
  19. package/dist/checks/rope.js +2 -2
  20. package/dist/clip_by_value-BnO7-a88.js +12 -0
  21. package/dist/complex-DjxcVmoX.js +11 -0
  22. package/dist/concat-BV8bt5H-.js +17 -0
  23. package/dist/{concat_util-iBYIyuQe.js → concat_util-DpW8mL_l.js} +1 -1
  24. package/dist/{dataset-D2P7rHAw.js → dataset-BcwmTGYc.js} +137 -139
  25. package/dist/dropout-BcvN9JYi.js +92 -0
  26. package/dist/expand_dims-DT4tEPwA.js +11 -0
  27. package/dist/{exports_initializers-CZSUJoVE.js → exports_initializers-Hta_rEnm.js} +1 -1
  28. package/dist/floor-D5QdR_le.js +9 -0
  29. package/dist/gather-D3JcZUaI.js +9 -0
  30. package/dist/{gelu-Bmhopi0J.js → gelu-CjNPL4OH.js} +10 -11
  31. package/dist/{gpgpu_math-DsCcikas.js → gpgpu_math-DAOmgtXR.js} +841 -1015
  32. package/dist/{index-DRyE072i.js → index-BwexR4lA.js} +262 -263
  33. package/dist/index-DOvlwCh-.js +3520 -0
  34. package/dist/{kernel_funcs_utils-CWfOAPGO.js → kernel_funcs_utils-CCzYdUZg.js} +130 -132
  35. package/dist/layers/BaseLayer.js +15 -16
  36. package/dist/layers/CausalSelfAttention.js +6 -6
  37. package/dist/layers/MLP.js +4 -4
  38. package/dist/layers/PositionEmbedding.js +7 -7
  39. package/dist/layers/RMSNorm.js +3 -3
  40. package/dist/layers/RoPECache.js +9 -9
  41. package/dist/layers/TiedEmbedding.js +6 -6
  42. package/dist/layers/TransformerBlock.js +1 -1
  43. package/dist/loader/loadTransformers.js +1 -1
  44. package/dist/loader/oldZipLoad.js +13 -14
  45. package/dist/log_sum_exp-ngO0-4pK.js +39 -0
  46. package/dist/main.js +49 -50
  47. package/dist/{matMul16-fEAJ4smh.js → matMul16-BWRSOCWB.js} +14 -15
  48. package/dist/matMulGelu-CzfgT6Wq.js +163 -0
  49. package/dist/mat_mul-SjpJRLyL.js +11 -0
  50. package/dist/mod-AnXEvvpo.js +11 -0
  51. package/dist/models/NanoGPTV1.js +2 -2
  52. package/dist/models/model.js +13 -14
  53. package/dist/ones-D2rT0xk2.js +14 -0
  54. package/dist/ops/adamAdjust.js +1 -1
  55. package/dist/ops/adamMoments.js +1 -1
  56. package/dist/ops/add16.js +1 -1
  57. package/dist/ops/appendCache.js +3 -3
  58. package/dist/ops/attentionMask.js +1 -1
  59. package/dist/ops/concat16.js +2 -2
  60. package/dist/ops/cpu/adamAdjust.js +13 -14
  61. package/dist/ops/cpu/adamMoments.js +6 -7
  62. package/dist/ops/cpu/appendCache.js +7 -8
  63. package/dist/ops/cpu/attentionMask.js +7 -7
  64. package/dist/ops/cpu/fusedSoftmax.js +10 -11
  65. package/dist/ops/cpu/gatherSub.js +9 -10
  66. package/dist/ops/cpu/gelu.js +9 -10
  67. package/dist/ops/cpu/matMul16.js +6 -7
  68. package/dist/ops/cpu/matMulGelu.js +5 -6
  69. package/dist/ops/cpu/matMulMul.js +3 -4
  70. package/dist/ops/cpu/mulDropout.js +3 -4
  71. package/dist/ops/cpu/normRMS.js +10 -11
  72. package/dist/ops/cpu/qkv.js +8 -9
  73. package/dist/ops/cpu/rope.js +5 -6
  74. package/dist/ops/cpu/scatterSub.js +17 -19
  75. package/dist/ops/dot16.js +2 -2
  76. package/dist/ops/gatherSub.js +1 -1
  77. package/dist/ops/gelu.js +2 -2
  78. package/dist/ops/grads/add16.js +11 -12
  79. package/dist/ops/grads/attentionMask.js +5 -6
  80. package/dist/ops/grads/gelu.js +3 -4
  81. package/dist/ops/grads/matMul16.js +4 -5
  82. package/dist/ops/grads/matMulGelu.js +9 -10
  83. package/dist/ops/grads/normRMS.js +7 -8
  84. package/dist/ops/grads/pack16.js +4 -5
  85. package/dist/ops/grads/qkv.js +17 -19
  86. package/dist/ops/grads/rope.js +3 -5
  87. package/dist/ops/grads/softmax16.js +3 -4
  88. package/dist/ops/grads/unpack16.js +3 -4
  89. package/dist/ops/grads/utils.d.ts +1 -0
  90. package/dist/ops/grads/utils.js +8 -4
  91. package/dist/ops/matMul16.js +3 -3
  92. package/dist/ops/matMulGelu.js +2 -2
  93. package/dist/ops/matMulMul.js +1 -1
  94. package/dist/ops/mul16.js +1 -1
  95. package/dist/ops/mulDrop.js +1 -1
  96. package/dist/ops/normRMS.js +1 -1
  97. package/dist/ops/pack16.js +3 -4
  98. package/dist/ops/qkv.js +4 -8
  99. package/dist/ops/reshape16.js +14 -16
  100. package/dist/ops/rope.d.ts +1 -1
  101. package/dist/ops/rope.js +3 -8
  102. package/dist/ops/scatterSub.js +1 -1
  103. package/dist/ops/slice16.js +2 -2
  104. package/dist/ops/softmax16.js +5 -8
  105. package/dist/ops/sub16.js +1 -1
  106. package/dist/ops/sum16.js +2 -2
  107. package/dist/ops/transpose16.js +23 -24
  108. package/dist/ops/unpack16.js +2 -2
  109. package/dist/ops/webgl/adamAdjust.js +2 -3
  110. package/dist/ops/webgl/adamMoments.js +1 -2
  111. package/dist/ops/webgl/appendCache.js +1 -2
  112. package/dist/ops/webgl/attentionMask.js +4 -5
  113. package/dist/ops/webgl/fusedSoftmax.js +4 -6
  114. package/dist/ops/webgl/gatherSub.js +6 -7
  115. package/dist/ops/webgl/gelu.js +2 -3
  116. package/dist/ops/webgl/log.js +11 -12
  117. package/dist/ops/webgl/matMul16.js +10 -11
  118. package/dist/ops/webgl/matMulGelu.js +7 -111
  119. package/dist/ops/webgl/matMulMul.js +9 -10
  120. package/dist/ops/webgl/mulDropout.js +8 -9
  121. package/dist/ops/webgl/normRMS.js +2 -3
  122. package/dist/ops/webgl/qkv.js +5 -6
  123. package/dist/ops/webgl/rope.js +7 -8
  124. package/dist/ops/webgl/scatterSub.js +5 -6
  125. package/dist/ops/webgpu/adamAdjust.js +10 -12
  126. package/dist/ops/webgpu/adamMoments.js +8 -10
  127. package/dist/ops/webgpu/add16.js +8 -9
  128. package/dist/ops/webgpu/appendCache.js +23 -25
  129. package/dist/ops/webgpu/attentionMask.js +8 -10
  130. package/dist/ops/webgpu/attentionMask32_program.js +2 -2
  131. package/dist/ops/webgpu/concat16.js +12 -14
  132. package/dist/ops/webgpu/gatherSub.js +11 -13
  133. package/dist/ops/webgpu/gelu.js +28 -29
  134. package/dist/ops/webgpu/matMul16.js +26 -28
  135. package/dist/ops/webgpu/matMul16_program.js +4 -5
  136. package/dist/ops/webgpu/mul16.js +9 -10
  137. package/dist/ops/webgpu/normRMS.js +15 -17
  138. package/dist/ops/webgpu/normRMSGrad.js +21 -28
  139. package/dist/ops/webgpu/pack16.js +12 -13
  140. package/dist/ops/webgpu/pack16_program.js +2 -2
  141. package/dist/ops/webgpu/qkv.js +16 -18
  142. package/dist/ops/webgpu/rope.js +25 -27
  143. package/dist/ops/webgpu/scatterSub.js +7 -9
  144. package/dist/ops/webgpu/slice16.js +21 -23
  145. package/dist/ops/webgpu/softmax16.js +17 -19
  146. package/dist/ops/webgpu/softmax16_program.js +2 -2
  147. package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
  148. package/dist/ops/webgpu/softmax16grad.js +7 -8
  149. package/dist/ops/webgpu/sub16.js +7 -8
  150. package/dist/ops/webgpu/sum16.js +18 -20
  151. package/dist/ops/webgpu/transpose16.js +19 -20
  152. package/dist/ops/webgpu/transpose16_program.js +2 -2
  153. package/dist/ops/webgpu/transpose16_shared_program.js +11 -12
  154. package/dist/ops/webgpu/unpack16.js +3 -4
  155. package/dist/ops/webgpu/utils/binary_op.js +7 -8
  156. package/dist/ops/webgpu/utils/reductions.js +14 -22
  157. package/dist/ops-B5yanEdW.js +476 -0
  158. package/dist/pack16-nQ6JaLo-.js +39 -0
  159. package/dist/patches/webgpu_backend.js +19 -20
  160. package/dist/patches/webgpu_base.js +1 -1
  161. package/dist/patches/webgpu_program.js +21 -22
  162. package/dist/{random_width-BVV9HveY.js → random_width-or-CEftb.js} +2506 -2761
  163. package/dist/range-BklejeeW.js +10 -0
  164. package/dist/relu-CP0ZcxWO.js +9 -0
  165. package/dist/reshape-ByE68wS9.js +9 -0
  166. package/dist/resize_nearest_neighbor-B19mCEg2.js +175 -0
  167. package/dist/rope-Ir4mTyD1.js +24 -0
  168. package/dist/{scatter_nd_util-C7zXRT_h.js → scatter_nd_util-lvSiX8q4.js} +1 -1
  169. package/dist/selu_util-kbhpTdYD.js +44 -0
  170. package/dist/{shared-CHhxz-O5.js → shared-DT1TkE6w.js} +1 -1
  171. package/dist/{shared-D2NP_CpY.js → shared-dntlHIDQ.js} +343 -345
  172. package/dist/slice-BfEGSH82.js +12 -0
  173. package/dist/{slice_util-DyjSAD0u.js → slice_util-uTKwiEpW.js} +1 -1
  174. package/dist/{softmax-C9JQEtnO.js → softmax-CA5jFsLR.js} +4 -5
  175. package/dist/split-CVLc0w--.js +9 -0
  176. package/dist/squeeze-C7Z2srUo.js +10 -0
  177. package/dist/stack-Cf4n9h0N.js +11 -0
  178. package/dist/step-CINUs5QB.js +261 -0
  179. package/dist/sum-DWAtNGez.js +11 -0
  180. package/dist/tensor-DJoc7gJU.js +8 -0
  181. package/dist/tensor1d-D11P_7Dp.js +11 -0
  182. package/dist/{tensor2d-CSB4KOb0.js → tensor2d-Bs9wZRc7.js} +6 -7
  183. package/dist/{tensor4d-D7bLqGqz.js → tensor4d-BARPdTaS.js} +6 -7
  184. package/dist/{tfjs_backend-CNkSTL0c.js → tfjs_backend-y1cvNhLA.js} +255 -264
  185. package/dist/tile-mbfagpsB.js +11 -0
  186. package/dist/training/Adam.js +2 -2
  187. package/dist/training/AdamExt.js +1 -1
  188. package/dist/training/DatasetBuilder.js +2 -2
  189. package/dist/training/FullTrainer.js +1 -1
  190. package/dist/training/Trainer.js +2 -2
  191. package/dist/training/sparseCrossEntropy.js +5 -5
  192. package/dist/transpose-ClWiBS_b.js +36 -0
  193. package/dist/unsorted_segment_sum-BDDhB_E6.js +277 -0
  194. package/dist/utilities/dummy.js +3 -3
  195. package/dist/utilities/multinomialCPU.js +2 -2
  196. package/dist/utilities/packed.d.ts +1 -4
  197. package/dist/utilities/packed.js +10 -745
  198. package/dist/utilities/performance.js +1 -1
  199. package/dist/utilities/profile.js +1 -1
  200. package/dist/utilities/safetensors.js +2 -2
  201. package/dist/utilities/sentences.js +5 -5
  202. package/dist/utilities/weights.js +2 -2
  203. package/dist/{variable-DzfrwYuP.js → variable-WawDEaAb.js} +1 -1
  204. package/dist/{webgpu_program-DzaQiqel.js → webgpu_program-DuOXPQol.js} +178 -172
  205. package/dist/{webgpu_util-0_ubCEHJ.js → webgpu_util-RxEF33Rj.js} +34 -35
  206. package/dist/zeros-KnWaWf-X.js +13 -0
  207. package/dist/zeros_like-DvE73F4e.js +721 -0
  208. package/package.json +4 -2
  209. package/dist/Reshape-CDVLyVfz.js +0 -16
  210. package/dist/broadcast_to-B0ChcDaz.js +0 -30
  211. package/dist/complex-BBiRlsVq.js +0 -13
  212. package/dist/concat-DmBLPVGC.js +0 -19
  213. package/dist/dropout-B1x1kYMa.js +0 -99
  214. package/dist/expand_dims-ouvfxQ1n.js +0 -13
  215. package/dist/gather-CH9sdacz.js +0 -10
  216. package/dist/index-D6Q1lPZO.js +0 -2157
  217. package/dist/log_sum_exp-D3ftBNY5.js +0 -41
  218. package/dist/mat_mul-C59XWcJd.js +0 -12
  219. package/dist/mod-DESSvHIU.js +0 -12
  220. package/dist/mulmat_packed_gpu-Coh6qbJk.js +0 -55
  221. package/dist/ones-jU9jlQvM.js +0 -15
  222. package/dist/ops-BFDtP6th.js +0 -645
  223. package/dist/pack16-CmVZs6af.js +0 -41
  224. package/dist/patches/PackedTensor.d.ts +0 -12
  225. package/dist/patches/PackedTensor.js +0 -11
  226. package/dist/patches/engine.d.ts +0 -261
  227. package/dist/patches/engine.js +0 -12
  228. package/dist/patches/tape.d.ts +0 -12
  229. package/dist/patches/tape.js +0 -5
  230. package/dist/range-ZZZD60Fx.js +0 -11
  231. package/dist/reciprocal-CrYlsAGD.js +0 -10
  232. package/dist/register_all_kernels-nvj2k7OC.js +0 -12307
  233. package/dist/relu-BYDneVPn.js +0 -10
  234. package/dist/reshape-CaPQzFvz.js +0 -10
  235. package/dist/rope-s4W2XO9B.js +0 -32
  236. package/dist/selu_util-BGPXmd4B.js +0 -303
  237. package/dist/sin-Djs4aQiu.js +0 -16
  238. package/dist/slice-DvovR5wq.js +0 -13
  239. package/dist/split-DBck65sX.js +0 -10
  240. package/dist/squeeze-C00Ipm_7.js +0 -11
  241. package/dist/stack-ChnHwRpX.js +0 -13
  242. package/dist/sum-ywRJj3Zr.js +0 -12
  243. package/dist/tensor-0r5yOo2R.js +0 -8
  244. package/dist/tensor-CzmOBsdf.js +0 -909
  245. package/dist/tensor1d-BlUT89BP.js +0 -12
  246. package/dist/tensor_util-DfwaWayG.js +0 -523
  247. package/dist/tile-CR074jmp.js +0 -13
  248. package/dist/transpose-DH4gmHvu.js +0 -38
  249. package/dist/zeros-DBFVbpv5.js +0 -14
@@ -1,22 +1,22 @@
1
- import "../utilities/packed.js";
2
- import { H as y } from "../index-D6Q1lPZO.js";
1
+ import { ae as y } from "../index-DOvlwCh-.js";
2
+ import "../random_width-or-CEftb.js";
3
+ import "../zeros_like-DvE73F4e.js";
4
+ import "../Generator.js";
5
+ import "../index-Cp39cXWe.js";
6
+ import "../dataset-BcwmTGYc.js";
3
7
  import "../ops/cpu/attentionMask.js";
4
8
  import "../ops/webgl/attentionMask.js";
5
9
  import "../ops/grads/attentionMask.js";
6
- import "../random_width-BVV9HveY.js";
7
- import "../register_all_kernels-nvj2k7OC.js";
8
- import "../index-Cp39cXWe.js";
9
- import "../dataset-D2P7rHAw.js";
10
10
  import "../ops/cpu/rope.js";
11
11
  import "../ops/webgl/rope.js";
12
- import "../rope-s4W2XO9B.js";
12
+ import "../rope-Ir4mTyD1.js";
13
13
  import "../ops/cpu/appendCache.js";
14
14
  import "../ops/webgl/appendCache.js";
15
15
  import "../ops/grads/softmax16.js";
16
- import "../matMul16-fEAJ4smh.js";
16
+ import "../matMul16-BWRSOCWB.js";
17
17
  import "../ops/webgl/matMul16.js";
18
18
  import "../ops/cpu/matMul16.js";
19
- import "../pack16-CmVZs6af.js";
19
+ import "../pack16-nQ6JaLo-.js";
20
20
  import "../ops/transpose16.js";
21
21
  import "../ops/reshape16.js";
22
22
  import "../ops/cpu/qkv.js";
@@ -35,7 +35,6 @@ import h from "../tokeniser/CharTokeniser.js";
35
35
  import k from "../tokeniser/bpe.js";
36
36
  import { dummyPassAsync as g } from "../utilities/dummy.js";
37
37
  import b from "../models/factory.js";
38
- import "../Generator.js";
39
38
  import "../index-DvYrXKkX.js";
40
39
  import "../ops/cpu/adamAdjust.js";
41
40
  import "../ops/webgl/adamAdjust.js";
@@ -43,16 +42,16 @@ import "../ops/cpu/adamMoments.js";
43
42
  import "../ops/webgl/adamMoments.js";
44
43
  import "../papaparse.min-C0cScC2i.js";
45
44
  import "../ops/cpu/matMulGelu.js";
46
- import "../ops/webgl/matMulGelu.js";
45
+ import "../matMulGelu-CzfgT6Wq.js";
47
46
  import "../ops/grads/matMulGelu.js";
48
47
  import "../ops/cpu/gelu.js";
49
48
  import "../ops/webgl/gelu.js";
50
- import "../gelu-Bmhopi0J.js";
49
+ import "../gelu-CjNPL4OH.js";
51
50
  import "../ops/webgl/log.js";
52
51
  import "../checks/normRMS.js";
53
52
  import "../checks/normRMSGrad.js";
54
53
  import { importWeights as u } from "../utilities/weights.js";
55
- async function Ot(r) {
54
+ async function Mt(r) {
56
55
  const e = /* @__PURE__ */ new Map(), p = await r.file("manifest.json")?.async("string");
57
56
  if (!p)
58
57
  throw new Error("Manifest file not found in the zip archive");
@@ -75,5 +74,5 @@ async function Ot(r) {
75
74
  return await g(m), m.loadWeights(c), { model: m, tokeniser: f };
76
75
  }
77
76
  export {
78
- Ot as default
77
+ Mt as default
79
78
  };
@@ -0,0 +1,39 @@
1
+ import { A as r, B as x, E as p, am as E, an as $, ao as d, af as h, c as S, x as K } from "./index-DOvlwCh-.js";
2
+ import { e as _ } from "./axis_util-BaG7mf5A.js";
3
+ import { r as m } from "./reshape-ByE68wS9.js";
4
+ import { s as T } from "./sum-DWAtNGez.js";
5
+ function b(s, o = null, n = !1) {
6
+ const a = { x: x(s, "x", "max") }, e = { reductionIndices: o, keepDims: n };
7
+ return p.runKernel(E, a, e);
8
+ }
9
+ const A = /* @__PURE__ */ r({ max_: b });
10
+ function I(s) {
11
+ const n = { x: x(s, "x", "exp") };
12
+ return p.runKernel($, n);
13
+ }
14
+ const M = /* @__PURE__ */ r({ exp_: I });
15
+ function N(s) {
16
+ const n = { x: x(s, "x", "log", "float32") };
17
+ return p.runKernel(d, n);
18
+ }
19
+ const v = /* @__PURE__ */ r({ log_: N });
20
+ function w(s, o = null, n = !1) {
21
+ const t = x(s, "x", "logSumExp"), a = h(o, t.shape), e = A(
22
+ t,
23
+ a,
24
+ !0
25
+ /* keepDims */
26
+ ), l = S(t, e), i = M(l), f = T(i, a), u = v(f), c = K(m(e, u.shape), u);
27
+ if (n) {
28
+ const g = _(c.shape, a);
29
+ return m(c, g);
30
+ }
31
+ return c;
32
+ }
33
+ const P = /* @__PURE__ */ r({ logSumExp_: w });
34
+ export {
35
+ v as a,
36
+ M as e,
37
+ P as l,
38
+ A as m
39
+ };
package/dist/main.js CHANGED
@@ -1,15 +1,18 @@
1
- import "./utilities/packed.js";
2
- import { default as ro } from "./models/NanoGPTV1.js";
3
- import { default as eo } from "./TeachableLLM.js";
4
- import { default as po } from "./tokeniser/CharTokeniser.js";
5
- import { default as ao } from "./tokeniser/bpe.js";
6
- import { default as fo } from "./utilities/waitForModel.js";
7
- import { default as no } from "./data/textLoader.js";
8
- import { default as uo } from "./Generator.js";
9
- import { default as To } from "./models/model.js";
10
- import { estimateMemoryUsage as go, estimateParameterCount as Mo, estimateResources as Po, estimateTrainingMemoryUsage as Co, validateConfig as Eo } from "./utilities/parameters.js";
11
- import { default as Bo } from "./utilities/topP.js";
12
- import "./index-D6Q1lPZO.js";
1
+ import "./index-DOvlwCh-.js";
2
+ import "./random_width-or-CEftb.js";
3
+ import "./zeros_like-DvE73F4e.js";
4
+ import { default as oo } from "./Generator.js";
5
+ import "./index-Cp39cXWe.js";
6
+ import "./dataset-BcwmTGYc.js";
7
+ import { default as to } from "./models/NanoGPTV1.js";
8
+ import { default as mo } from "./TeachableLLM.js";
9
+ import { default as io } from "./tokeniser/CharTokeniser.js";
10
+ import { default as so } from "./tokeniser/bpe.js";
11
+ import { default as lo } from "./utilities/waitForModel.js";
12
+ import { default as xo } from "./data/textLoader.js";
13
+ import { default as co } from "./models/model.js";
14
+ import { estimateMemoryUsage as ko, estimateParameterCount as go, estimateResources as Mo, estimateTrainingMemoryUsage as Po, validateConfig as Co } from "./utilities/parameters.js";
15
+ import { default as ho } from "./utilities/topP.js";
13
16
  import "./ops/cpu/scatterSub.js";
14
17
  import "./ops/webgl/scatterSub.js";
15
18
  import "./ops/cpu/gatherSub.js";
@@ -20,21 +23,17 @@ import "./ops/grads/attentionMask.js";
20
23
  import "./ops/cpu/qkv.js";
21
24
  import "./ops/webgl/qkv.js";
22
25
  import "./ops/grads/qkv.js";
23
- import "./random_width-BVV9HveY.js";
24
- import "./register_all_kernels-nvj2k7OC.js";
25
- import "./index-Cp39cXWe.js";
26
- import "./dataset-D2P7rHAw.js";
27
26
  import "./ops/cpu/rope.js";
28
27
  import "./ops/webgl/rope.js";
29
- import "./rope-s4W2XO9B.js";
28
+ import "./rope-Ir4mTyD1.js";
30
29
  import "./ops/cpu/appendCache.js";
31
30
  import "./ops/webgl/appendCache.js";
32
31
  import "./ops/cpu/matMulGelu.js";
33
- import "./ops/webgl/matMulGelu.js";
32
+ import "./matMulGelu-CzfgT6Wq.js";
34
33
  import "./ops/grads/matMulGelu.js";
35
34
  import "./ops/cpu/gelu.js";
36
35
  import "./ops/webgl/gelu.js";
37
- import "./gelu-Bmhopi0J.js";
36
+ import "./gelu-CjNPL4OH.js";
38
37
  import "./ops/cpu/normRMS.js";
39
38
  import "./ops/webgl/normRMS.js";
40
39
  import "./ops/grads/normRMS.js";
@@ -43,51 +42,51 @@ import "./ops/cpu/adamMoments.js";
43
42
  import "./ops/webgl/adamMoments.js";
44
43
  import "./ops/cpu/adamAdjust.js";
45
44
  import "./ops/webgl/adamAdjust.js";
46
- import { u as o, p as r } from "./pack16-CmVZs6af.js";
45
+ import { u as o, p as r } from "./pack16-nQ6JaLo-.js";
47
46
  import "./ops/grads/softmax16.js";
48
- import "./matMul16-fEAJ4smh.js";
47
+ import "./matMul16-BWRSOCWB.js";
49
48
  import "./ops/webgl/matMul16.js";
50
49
  import "./ops/cpu/matMul16.js";
51
50
  import "./ops/transpose16.js";
52
- import { selectBackend as yo } from "./backend.js";
53
- import { default as Ao } from "./utilities/performance.js";
51
+ import { selectBackend as bo } from "./backend.js";
52
+ import { default as Lo } from "./utilities/performance.js";
54
53
  import t from "./layers/CausalSelfAttention.js";
55
54
  import e from "./layers/MLP.js";
56
55
  import m from "./layers/TransformerBlock.js";
57
56
  import p from "./layers/RoPECache.js";
58
- import { default as Ro } from "./training/AdamExt.js";
59
- import { default as vo } from "./checks/index.js";
60
- import { sentenceEmbeddings as Do, sentenceEmbeddingsTensor as Fo } from "./utilities/sentences.js";
61
- const Z = {
57
+ import { default as Go } from "./training/AdamExt.js";
58
+ import { default as Uo } from "./checks/index.js";
59
+ import { sentenceEmbeddings as wo, sentenceEmbeddingsTensor as Do } from "./utilities/sentences.js";
60
+ const Y = {
62
61
  pack16: r,
63
62
  unpack16: o
64
- }, _ = {
63
+ }, Z = {
65
64
  CausalSelfAttention: t,
66
65
  MLP: e,
67
66
  TransformerBlock: m,
68
67
  RoPECache: p
69
68
  };
70
69
  export {
71
- Ro as AdamExt,
72
- ao as BPETokeniser,
73
- po as CharTokeniser,
74
- uo as Generator,
75
- To as Model,
76
- ro as NanoGPT,
77
- eo as TeachableLLM,
78
- vo as checks,
79
- go as estimateMemoryUsage,
80
- Mo as estimateParameterCount,
81
- Po as estimateResources,
82
- Co as estimateTrainingMemoryUsage,
83
- _ as layers,
84
- no as loadTextData,
85
- Z as ops,
86
- Ao as performanceTest,
87
- yo as selectBackend,
88
- Do as sentenceEmbeddings,
89
- Fo as sentenceEmbeddingsTensor,
90
- Bo as topP,
91
- Eo as validateConfig,
92
- fo as waitForModel
70
+ Go as AdamExt,
71
+ so as BPETokeniser,
72
+ io as CharTokeniser,
73
+ oo as Generator,
74
+ co as Model,
75
+ to as NanoGPT,
76
+ mo as TeachableLLM,
77
+ Uo as checks,
78
+ ko as estimateMemoryUsage,
79
+ go as estimateParameterCount,
80
+ Mo as estimateResources,
81
+ Po as estimateTrainingMemoryUsage,
82
+ Z as layers,
83
+ xo as loadTextData,
84
+ Y as ops,
85
+ Lo as performanceTest,
86
+ bo as selectBackend,
87
+ wo as sentenceEmbeddings,
88
+ Do as sentenceEmbeddingsTensor,
89
+ ho as topP,
90
+ Co as validateConfig,
91
+ lo as waitForModel
93
92
  };
@@ -1,13 +1,12 @@
1
- import { e as y } from "./index-D6Q1lPZO.js";
1
+ import { u as y, e as h } from "./index-DOvlwCh-.js";
2
2
  import "./ops/webgl/matMul16.js";
3
3
  import "./ops/cpu/matMul16.js";
4
- import { isPackedTensor as g, packTensor as k } from "./utilities/packed.js";
5
- import { p as v } from "./pack16-CmVZs6af.js";
6
- import { d as h } from "./gelu-Bmhopi0J.js";
4
+ import { isPackedTensor as g } from "./utilities/packed.js";
5
+ import { p as v } from "./pack16-nQ6JaLo-.js";
6
+ import { d as k } from "./gelu-CjNPL4OH.js";
7
7
  import { transpose16 as S } from "./ops/transpose16.js";
8
8
  import { reshape16 as w } from "./ops/reshape16.js";
9
- import { a as G } from "./tensor_util-DfwaWayG.js";
10
- const T = {
9
+ const G = {
11
10
  kernelName: "MatMul16",
12
11
  inputsToSave: ["A", "B"],
13
12
  outputsToSave: [],
@@ -30,7 +29,7 @@ const T = {
30
29
  }
31
30
  if (p === "gelu") {
32
31
  const u = e, m = l(s, t, f, i);
33
- e = h(u, m), u.dispose(), m.dispose();
32
+ e = k(u, m), u.dispose(), m.dispose();
34
33
  }
35
34
  if (!f && !i)
36
35
  return {
@@ -50,12 +49,12 @@ const T = {
50
49
  throw new Error("Gradient for transposeA=true and transposeB=true is not supported yet.");
51
50
  }
52
51
  };
53
- G(T);
52
+ y(G);
54
53
  function l(r, o, n = !1, s = !1, t = {}) {
55
- const e = g(r), f = g(o), i = e || f, a = !i || e ? r : v(r), p = !i || f ? o : v(o), c = y().runKernel("MatMul16", { A: a, B: p }, { transposeA: n, transposeB: s, ...t });
56
- return i && !e && a.dispose(), i && !f && p.dispose(), i ? k(c) : c;
54
+ const e = g(r), f = g(o), i = e || f, a = !i || e ? r : v(r), p = !i || f ? o : v(o), c = h().runKernel("MatMul16", { A: a, B: p }, { transposeA: n, transposeB: s, ...t });
55
+ return i && !e && a.dispose(), i && !f && p.dispose(), c;
57
56
  }
58
- function j(r, o, n, s = !1, t = !1) {
57
+ function K(r, o, n, s = !1, t = !1) {
59
58
  return l(r, o, s, t, { scale: n });
60
59
  }
61
60
  function B(r, o, n, s = !1, t = !1) {
@@ -64,14 +63,14 @@ function B(r, o, n, s = !1, t = !1) {
64
63
  function M(r, o, n, s = !1, t = !1) {
65
64
  return l(r, o, s, t, { scaleB: n });
66
65
  }
67
- function q(r, o, n = !1, s = !1) {
66
+ function N(r, o, n = !1, s = !1) {
68
67
  return l(r, o, n, s, { activation: "gelu" });
69
68
  }
70
69
  export {
71
- T as a,
70
+ G as a,
72
71
  l as b,
73
- q as c,
72
+ N as c,
74
73
  B as d,
75
74
  M as e,
76
- j as m
75
+ K as m
77
76
  };
@@ -0,0 +1,163 @@
1
+ import { p as C, t as R, e as I, h as G, a3 as L, l as F, ak as U } from "./index-DOvlwCh-.js";
2
+ import { r as M } from "./Reshape-Zt6eb7yh.js";
3
+ import { u as H } from "./gpgpu_math-DAOmgtXR.js";
4
+ import { m as B } from "./mat_mul-SjpJRLyL.js";
5
+ class W {
6
+ constructor(e, s, a, n = !1, o = !1, r = !1, i = null, u = !1, l = !1) {
7
+ this.variableNames = ["matrixA", "matrixB"], this.packedInputs = !0, this.packedOutput = !0, this.outputShape = a, this.enableShapeUniforms = H(this.outputShape.length);
8
+ const p = n ? e[1] : e[2], h = Math.ceil(p / 2), d = n ? "i * 2, rc.y" : "rc.y, i * 2", x = o ? "rc.z, i * 2" : "i * 2, rc.z", b = n ? ["a.xxyy", "a.zzww"] : ["a.xxzz", "a.yyww"], m = o ? ["b.xzxz", "b.ywyw"] : ["b.xyxy", "b.zwzw"];
9
+ let c = "", g = "";
10
+ i && (u ? c = `vec4 activation(vec4 a) {
11
+ vec4 b = getPreluActivationWeightsAtOutCoords();
12
+ ${i}
13
+ }` : l ? c = `vec4 activation(vec4 a) {
14
+ vec4 b = getLeakyreluAlphaAtOutCoords();
15
+ ${i}
16
+ }` : c = `vec4 activation(vec4 x) {
17
+ ${i}
18
+ }`, g = "result = activation(result);");
19
+ const $ = r ? "result += getBiasAtOutCoords();" : "";
20
+ r && this.variableNames.push("bias"), u && this.variableNames.push("preluActivationWeights"), l && this.variableNames.push("leakyreluAlpha");
21
+ let f = "rc.x", v = "rc.x";
22
+ e[0] < s[0] ? f = `imod(rc.x, ${e[0]})` : s[0] < e[0] && (v = `imod(rc.x, ${s[0]})`), this.userCode = `
23
+ ${c}
24
+ // Don't use uniform for sharedDimensionPacked for performance.
25
+ const float sharedDimension = ${h}.0;
26
+
27
+ vec4 dot2x2ARowBCol(ivec3 rc) {
28
+ vec4 result = vec4(0);
29
+ int batchA = ${f};
30
+ int batchB = ${v};
31
+ for (int i = 0; i < ${h}; i++) {
32
+ vec4 a = getMatrixA(batchA, ${d});
33
+ vec4 b = getMatrixB(batchB, ${x});
34
+
35
+ // These swizzled products need to be separately added.
36
+ // See: https://github.com/tensorflow/tfjs/issues/1735
37
+ result += (${b[0]} * ${m[0]});
38
+ result += (${b[1]} * ${m[1]});
39
+ }
40
+ return result;
41
+ }
42
+
43
+ void main() {
44
+ ivec3 rc = getOutputCoords();
45
+ vec4 result = dot2x2ARowBCol(rc);
46
+
47
+ ${$}
48
+
49
+ ${g}
50
+
51
+ setOutput(result);
52
+ }
53
+ `;
54
+ }
55
+ }
56
+ const S = 0.7978845608028654, w = 0.044715, j = `
57
+ vec4 x3 = x * x * x;
58
+ vec4 inner = x + ${w} * x3;
59
+ inner = ${S} * inner;
60
+ inner = vec4(
61
+ abs(inner[0]) > 15.0 ? sign(inner[0]) : tanh(inner[0]),
62
+ abs(inner[1]) > 15.0 ? sign(inner[1]) : tanh(inner[1]),
63
+ abs(inner[2]) > 15.0 ? sign(inner[2]) : tanh(inner[2]),
64
+ abs(inner[3]) > 15.0 ? sign(inner[3]) : tanh(inner[3])
65
+ );
66
+ inner = 0.5 * (1.0 + inner);
67
+ vec4 result = x * inner;
68
+ return result;
69
+ `, q = `
70
+ vec4 a2 = a * a;
71
+ vec4 a3 = a2 * a;
72
+ vec4 u = ${S} * (a + ${w} * a3);
73
+ vec4 t = vec4(
74
+ abs(u[0]) > 15.0 ? sign(u[0]) : tanh(u[0]),
75
+ abs(u[1]) > 15.0 ? sign(u[1]) : tanh(u[1]),
76
+ abs(u[2]) > 15.0 ? sign(u[2]) : tanh(u[2]),
77
+ abs(u[3]) > 15.0 ? sign(u[3]) : tanh(u[3])
78
+ );
79
+ vec4 sech2 = 1.0 - t * t;
80
+ vec4 du_dx = ${S} * (1.0 + 3.0 * ${w} * a2);
81
+ vec4 dgelu = 0.5 * (1.0 + t) + 0.5 * a * sech2 * du_dx;
82
+ return dgelu * b;
83
+ `, se = 1e3;
84
+ function O({
85
+ a: t,
86
+ b: e,
87
+ transposeA: s,
88
+ transposeB: a,
89
+ backend: n,
90
+ activationSnippet: o,
91
+ multiplier: r
92
+ }) {
93
+ const i = t.shape.length, u = e.shape.length, l = s ? t.shape[i - 2] : t.shape[i - 1], p = a ? e.shape[u - 1] : e.shape[u - 2], h = s ? t.shape[i - 1] : t.shape[i - 2], d = a ? e.shape[u - 2] : e.shape[u - 1], x = t.shape.slice(0, -2), b = e.shape.slice(0, -2), m = G(x), c = G(b), $ = L(t.shape.slice(0, -2), e.shape.slice(0, -2)).concat([h, d]);
94
+ F(
95
+ l === p,
96
+ () => `Error in matMul: inner shapes (${l}) and (${p}) of Tensors with shapes ${t.shape} and ${e.shape} and transposeA=${s} and transposeB=${a} must match.`
97
+ );
98
+ const f = s ? [m, l, h] : [m, h, l], v = a ? [c, d, p] : [c, p, d], A = M({ inputs: { x: t }, backend: n, attrs: { shape: f } }), y = M({ inputs: { x: e }, backend: n, attrs: { shape: v } }), k = [A, y], E = Math.max(m, c), N = o, T = U(t.dtype, e.dtype), _ = new W(
99
+ f,
100
+ v,
101
+ [E, h, d],
102
+ s,
103
+ a,
104
+ !1,
105
+ N,
106
+ !!r,
107
+ !1
108
+ ), D = [A, y];
109
+ r && D.push(r);
110
+ const z = n.runWebGLProgram(_, D, T), K = M({ inputs: { x: z }, backend: n, attrs: { shape: $ } });
111
+ k.push(z);
112
+ for (const P of k)
113
+ n.disposeIntermediateTensorInfo(P);
114
+ return K;
115
+ }
116
+ function J(t) {
117
+ const { inputs: e, backend: s } = t, { x: a, kernel: n } = e;
118
+ if (a === void 0 || n === void 0)
119
+ throw new Error("BatchMatMul requires two input tensors.");
120
+ return O({
121
+ a,
122
+ b: n,
123
+ transposeA: !1,
124
+ transposeB: !1,
125
+ backend: s,
126
+ activationSnippet: j
127
+ });
128
+ }
129
+ const Q = {
130
+ kernelName: "MatMulGelu",
131
+ backendName: "webgl",
132
+ kernelFunc: J
133
+ };
134
+ C(Q);
135
+ function V(t) {
136
+ const { dy: e, x: s, kernel: a } = t.inputs, n = t.backend;
137
+ return R(() => {
138
+ const o = I().makeTensorFromTensorInfo(
139
+ O({
140
+ a: s,
141
+ b: a,
142
+ transposeA: !1,
143
+ transposeB: !1,
144
+ backend: n,
145
+ activationSnippet: q,
146
+ multiplier: e
147
+ })
148
+ ), r = B(o, a, !1, !0), i = B(s, o, !0, !1);
149
+ return [r, i];
150
+ });
151
+ }
152
+ const X = {
153
+ kernelName: "MatMulGeluGrad",
154
+ backendName: "webgl",
155
+ kernelFunc: V
156
+ };
157
+ C(X);
158
+ export {
159
+ W as M,
160
+ se as a,
161
+ O as b,
162
+ J as c
163
+ };
@@ -0,0 +1,11 @@
1
+ import { A as m, B as s, L as M, E as c, M as p } from "./index-DOvlwCh-.js";
2
+ function f(e, n, o = !1, l = !1) {
3
+ let a = s(e, "a", "matMul"), t = s(n, "b", "matMul");
4
+ [a, t] = M(a, t);
5
+ const r = { a, b: t }, u = { transposeA: o, transposeB: l };
6
+ return c.runKernel(p, r, u);
7
+ }
8
+ const i = /* @__PURE__ */ m({ matMul_: f });
9
+ export {
10
+ i as m
11
+ };
@@ -0,0 +1,11 @@
1
+ import { A as r, B as s, L as m, E as c, N as d } from "./index-DOvlwCh-.js";
2
+ function p(t, e) {
3
+ let o = s(t, "a", "mod"), a = s(e, "b", "mod");
4
+ [o, a] = m(o, a);
5
+ const n = { a: o, b: a };
6
+ return c.runKernel(d, n);
7
+ }
8
+ const b = /* @__PURE__ */ r({ mod_: p });
9
+ export {
10
+ b as m
11
+ };
@@ -3,11 +3,11 @@ import b from "../layers/TransformerBlock.js";
3
3
  import k from "../layers/TiedEmbedding.js";
4
4
  import w from "../layers/RoPECache.js";
5
5
  import E from "../layers/RMSNorm.js";
6
- import { t as l, k as u } from "../index-D6Q1lPZO.js";
6
+ import { t as l, k as u } from "../index-DOvlwCh-.js";
7
7
  import C from "./model.js";
8
8
  import P from "../layers/PositionEmbedding.js";
9
9
  import { packingSupported as _ } from "../utilities/packed.js";
10
- import { p as y, u as M } from "../pack16-CmVZs6af.js";
10
+ import { p as y, u as M } from "../pack16-nQ6JaLo-.js";
11
11
  class I extends C {
12
12
  wte;
13
13
  // Token embeddings
@@ -1,23 +1,23 @@
1
1
  import m from "../layers/BaseLayer.js";
2
- import "../utilities/packed.js";
3
- import "../index-D6Q1lPZO.js";
2
+ import "../index-DOvlwCh-.js";
3
+ import "../random_width-or-CEftb.js";
4
+ import "../zeros_like-DvE73F4e.js";
5
+ import "../Generator.js";
6
+ import "../index-Cp39cXWe.js";
7
+ import "../dataset-BcwmTGYc.js";
4
8
  import "../ops/cpu/attentionMask.js";
5
9
  import "../ops/webgl/attentionMask.js";
6
10
  import "../ops/grads/attentionMask.js";
7
- import "../random_width-BVV9HveY.js";
8
- import "../register_all_kernels-nvj2k7OC.js";
9
- import "../index-Cp39cXWe.js";
10
- import "../dataset-D2P7rHAw.js";
11
11
  import "../ops/cpu/rope.js";
12
12
  import "../ops/webgl/rope.js";
13
- import "../rope-s4W2XO9B.js";
13
+ import "../rope-Ir4mTyD1.js";
14
14
  import "../ops/cpu/appendCache.js";
15
15
  import "../ops/webgl/appendCache.js";
16
16
  import "../ops/grads/softmax16.js";
17
- import "../matMul16-fEAJ4smh.js";
17
+ import "../matMul16-BWRSOCWB.js";
18
18
  import "../ops/webgl/matMul16.js";
19
19
  import "../ops/cpu/matMul16.js";
20
- import "../pack16-CmVZs6af.js";
20
+ import "../pack16-nQ6JaLo-.js";
21
21
  import "../ops/transpose16.js";
22
22
  import "../ops/reshape16.js";
23
23
  import "../ops/cpu/qkv.js";
@@ -29,7 +29,6 @@ import "../ops/grads/normRMS.js";
29
29
  import "../ops/grads/add16.js";
30
30
  import "../jszip.min-Bz5-11Bk.js";
31
31
  import "../index-DvYrXKkX.js";
32
- import "../Generator.js";
33
32
  import "../ops/cpu/adamAdjust.js";
34
33
  import "../ops/webgl/adamAdjust.js";
35
34
  import "../ops/cpu/adamMoments.js";
@@ -41,16 +40,16 @@ import "../ops/webgl/scatterSub.js";
41
40
  import "../ops/cpu/gatherSub.js";
42
41
  import "../ops/webgl/gatherSub.js";
43
42
  import "../ops/cpu/matMulGelu.js";
44
- import "../ops/webgl/matMulGelu.js";
43
+ import "../matMulGelu-CzfgT6Wq.js";
45
44
  import "../ops/grads/matMulGelu.js";
46
45
  import "../ops/cpu/gelu.js";
47
46
  import "../ops/webgl/gelu.js";
48
- import "../gelu-Bmhopi0J.js";
47
+ import "../gelu-CjNPL4OH.js";
49
48
  import "../ops/webgl/log.js";
50
49
  import "../checks/normRMS.js";
51
50
  import "../checks/normRMSGrad.js";
52
51
  import { createSoftmaxCrossEntropyWithGrad as s } from "../training/sparseCrossEntropy.js";
53
- class st extends m {
52
+ class et extends m {
54
53
  lossScaling = 128;
55
54
  trainingState = null;
56
55
  getNumParams() {
@@ -74,5 +73,5 @@ class st extends m {
74
73
  }
75
74
  }
76
75
  export {
77
- st as default
76
+ et as default
78
77
  };
@@ -0,0 +1,14 @@
1
+ import { C as n, _ as t, h as m, E as i } from "./index-DOvlwCh-.js";
2
+ import { c as f } from "./complex-DjxcVmoX.js";
3
+ import { z as c } from "./zeros-KnWaWf-X.js";
4
+ function l(o, r = "float32") {
5
+ if (n(o), r === "complex64") {
6
+ const s = l(o, "float32"), a = c(o, "float32");
7
+ return f(s, a);
8
+ }
9
+ const e = t(m(o), r);
10
+ return i.makeTensor(e, o, r);
11
+ }
12
+ export {
13
+ l as o
14
+ };
@@ -1,4 +1,4 @@
1
- import { e as i } from "../index-D6Q1lPZO.js";
1
+ import { e as i } from "../index-DOvlwCh-.js";
2
2
  import "./cpu/adamAdjust.js";
3
3
  import "./webgl/adamAdjust.js";
4
4
  function p(r, t, e, n, m, o) {
@@ -1,4 +1,4 @@
1
- import { e as t } from "../index-D6Q1lPZO.js";
1
+ import { e as t } from "../index-DOvlwCh-.js";
2
2
  import "./cpu/adamMoments.js";
3
3
  import "./webgl/adamMoments.js";
4
4
  function s(e, n, r, m, o) {
package/dist/ops/add16.js CHANGED
@@ -1,4 +1,4 @@
1
- import { n as t, e as o } from "../index-D6Q1lPZO.js";
1
+ import { x as t, e as o } from "../index-DOvlwCh-.js";
2
2
  import { isPackedTensor as n } from "../utilities/packed.js";
3
3
  import "./grads/add16.js";
4
4
  function m(r, e) {
@@ -1,9 +1,9 @@
1
- import { e as a } from "../index-D6Q1lPZO.js";
1
+ import { e as a } from "../index-DOvlwCh-.js";
2
2
  import "./cpu/appendCache.js";
3
3
  import "./webgl/appendCache.js";
4
4
  import { isPackedTensor as c } from "../utilities/packed.js";
5
- import { c as t } from "../concat-DmBLPVGC.js";
6
- import { z as f } from "../zeros-DBFVbpv5.js";
5
+ import { c as t } from "../concat-BV8bt5H-.js";
6
+ import { z as f } from "../zeros-KnWaWf-X.js";
7
7
  function C(r, o, n, p) {
8
8
  if (!p) {
9
9
  const e = r.shape[2], s = c(r);
@@ -1,4 +1,4 @@
1
- import { e as r } from "../index-D6Q1lPZO.js";
1
+ import { e as r } from "../index-DOvlwCh-.js";
2
2
  import "./cpu/attentionMask.js";
3
3
  import "./webgl/attentionMask.js";
4
4
  import "./grads/attentionMask.js";
@@ -1,6 +1,6 @@
1
1
  import { isPackedTensor as o } from "../utilities/packed.js";
2
- import { e } from "../index-D6Q1lPZO.js";
3
- import { c } from "../concat-DmBLPVGC.js";
2
+ import { e } from "../index-DOvlwCh-.js";
3
+ import { c } from "../concat-BV8bt5H-.js";
4
4
  function p(r, n) {
5
5
  return o(r[0]) ? e().runKernel("Concat16", r, { axis: n ?? -1 }) : c(r, n);
6
6
  }