@genai-fi/nanogpt 0.10.1 → 0.10.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.js +14 -14
- package/dist/{RealDiv-DgA3z9oO.js → RealDiv-zz7FpkKX.js} +17 -17
- package/dist/{Reshape-CF6odzV4.js → Reshape-CDVLyVfz.js} +3 -3
- package/dist/{Reshape-_kILl6tK.js → Reshape-CHdUjC72.js} +4 -4
- package/dist/TeachableLLM.js +8 -8
- package/dist/{axis_util-BvHEw88j.js → axis_util-BsIr9ZNu.js} +1 -1
- package/dist/backend.js +2 -2
- package/dist/{backend_util-D-rUb2ty.js → backend_util-B1XRLuq9.js} +31 -31
- package/dist/{backend_webgpu-B0u2ndUn.js → backend_webgpu-CqpfEImu.js} +5 -5
- package/dist/{broadcast_to-CwF7XIeu.js → broadcast_to-B0ChcDaz.js} +4 -4
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +5 -5
- package/dist/checks/normRMS.js +4 -4
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +3 -3
- package/dist/checks/rope.js +2 -2
- package/dist/{complex-CSlYz-2T.js → complex-BBiRlsVq.js} +3 -3
- package/dist/{concat-BHlIJeyT.js → concat-DmBLPVGC.js} +3 -3
- package/dist/{concat_util-DcJk7YHS.js → concat_util-iBYIyuQe.js} +1 -1
- package/dist/{dataset-0xP8GjwI.js → dataset-D2P7rHAw.js} +5 -5
- package/dist/{dropout-C1pM3f11.js → dropout-B1x1kYMa.js} +3 -3
- package/dist/{expand_dims-BPG4fwBP.js → expand_dims-ouvfxQ1n.js} +3 -3
- package/dist/{exports_initializers-xuidcwI4.js → exports_initializers-CZSUJoVE.js} +1 -1
- package/dist/{gather-DykLGqmW.js → gather-CH9sdacz.js} +2 -2
- package/dist/{gelu-CNLFZWea.js → gelu-Bmhopi0J.js} +2 -2
- package/dist/{gpgpu_math-DDVJCn6-.js → gpgpu_math-DsCcikas.js} +3 -3
- package/dist/{index-ZyQhjEPo.js → index-D6Q1lPZO.js} +55 -55
- package/dist/{index-CjOj7j-u.js → index-DRyE072i.js} +15 -15
- package/dist/{kernel_funcs_utils-Dg_-E44D.js → kernel_funcs_utils-CWfOAPGO.js} +9 -9
- package/dist/layers/BaseLayer.js +10 -10
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +5 -5
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +4 -4
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +8 -8
- package/dist/{log_sum_exp-DWI-76TI.js → log_sum_exp-D3ftBNY5.js} +6 -6
- package/dist/main.js +8 -8
- package/dist/{matMul16--R5hOwDG.js → matMul16-fEAJ4smh.js} +4 -4
- package/dist/{mat_mul-DeAh4uTH.js → mat_mul-C59XWcJd.js} +2 -2
- package/dist/{mod-Gt1rMB4n.js → mod-DESSvHIU.js} +2 -2
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +8 -8
- package/dist/{mulmat_packed_gpu-BMFhLwta.js → mulmat_packed_gpu-Coh6qbJk.js} +1 -1
- package/dist/{ones-CAMiP4I2.js → ones-jU9jlQvM.js} +4 -4
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +2 -2
- package/dist/ops/cpu/adamMoments.js +3 -3
- package/dist/ops/cpu/appendCache.js +3 -3
- package/dist/ops/cpu/attentionMask.js +6 -6
- package/dist/ops/cpu/fusedSoftmax.js +3 -3
- package/dist/ops/cpu/gatherSub.js +4 -4
- package/dist/ops/cpu/gelu.js +2 -2
- package/dist/ops/cpu/matMul16.js +3 -3
- package/dist/ops/cpu/matMulGelu.js +4 -4
- package/dist/ops/cpu/matMulMul.js +2 -2
- package/dist/ops/cpu/mulDropout.js +2 -2
- package/dist/ops/cpu/normRMS.js +2 -2
- package/dist/ops/cpu/qkv.js +4 -4
- package/dist/ops/cpu/rope.js +6 -6
- package/dist/ops/cpu/scatterSub.js +7 -7
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +2 -2
- package/dist/ops/grads/attentionMask.js +3 -3
- package/dist/ops/grads/gelu.js +3 -3
- package/dist/ops/grads/matMul16.js +4 -4
- package/dist/ops/grads/matMulGelu.js +2 -2
- package/dist/ops/grads/normRMS.js +2 -2
- package/dist/ops/grads/pack16.js +4 -4
- package/dist/ops/grads/qkv.js +4 -4
- package/dist/ops/grads/rope.js +3 -3
- package/dist/ops/grads/softmax16.js +2 -2
- package/dist/ops/grads/unpack16.js +3 -3
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +1 -1
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +2 -2
- package/dist/ops/qkv.js +1 -1
- package/dist/ops/reshape16.js +3 -3
- package/dist/ops/rope.js +5 -5
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +1 -1
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +4 -4
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +3 -3
- package/dist/ops/webgl/adamMoments.js +2 -2
- package/dist/ops/webgl/appendCache.js +2 -2
- package/dist/ops/webgl/attentionMask.js +2 -2
- package/dist/ops/webgl/fusedSoftmax.js +6 -6
- package/dist/ops/webgl/gatherSub.js +2 -2
- package/dist/ops/webgl/gelu.js +3 -3
- package/dist/ops/webgl/log.js +4 -4
- package/dist/ops/webgl/matMul16.js +5 -5
- package/dist/ops/webgl/matMulGelu.js +6 -6
- package/dist/ops/webgl/matMulMul.js +2 -2
- package/dist/ops/webgl/mulDropout.js +2 -2
- package/dist/ops/webgl/normRMS.js +3 -3
- package/dist/ops/webgl/qkv.js +2 -2
- package/dist/ops/webgl/rope.js +2 -2
- package/dist/ops/webgl/scatterSub.js +2 -2
- package/dist/ops/webgpu/adamAdjust.js +5 -5
- package/dist/ops/webgpu/adamMoments.js +5 -5
- package/dist/ops/webgpu/add16.js +2 -2
- package/dist/ops/webgpu/appendCache.js +5 -5
- package/dist/ops/webgpu/attentionMask.js +4 -4
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +7 -7
- package/dist/ops/webgpu/gatherSub.js +5 -5
- package/dist/ops/webgpu/gelu.js +4 -4
- package/dist/ops/webgpu/matMul16.js +6 -6
- package/dist/ops/webgpu/matMul16_program.js +3 -3
- package/dist/ops/webgpu/mul16.js +2 -2
- package/dist/ops/webgpu/normRMS.js +4 -4
- package/dist/ops/webgpu/normRMSGrad.js +6 -6
- package/dist/ops/webgpu/pack16.js +2 -2
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +4 -4
- package/dist/ops/webgpu/rope.js +5 -5
- package/dist/ops/webgpu/scatterSub.js +5 -5
- package/dist/ops/webgpu/slice16.js +6 -6
- package/dist/ops/webgpu/softmax16.js +4 -4
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +2 -2
- package/dist/ops/webgpu/sub16.js +2 -2
- package/dist/ops/webgpu/sum16.js +5 -5
- package/dist/ops/webgpu/transpose16.js +3 -3
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +4 -4
- package/dist/ops/webgpu/unpack16.js +4 -4
- package/dist/ops/webgpu/utils/binary_op.js +4 -4
- package/dist/ops/webgpu/utils/reductions.js +5 -5
- package/dist/{ops-CNI3TwqM.js → ops-BFDtP6th.js} +24 -24
- package/dist/{pack16-CFUqumar.js → pack16-CmVZs6af.js} +3 -3
- package/dist/patches/PackedTensor.js +1 -1
- package/dist/patches/engine.js +7 -5
- package/dist/patches/tape.js +1 -1
- package/dist/patches/webgpu_backend.js +5 -5
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +3 -3
- package/dist/{random_width-DY6Kk2Dl.js → random_width-BVV9HveY.js} +31 -31
- package/dist/{range-BMS52eQi.js → range-ZZZD60Fx.js} +2 -2
- package/dist/{reciprocal-CTmshQ9J.js → reciprocal-CrYlsAGD.js} +2 -2
- package/dist/{register_all_kernels-Bwu1PTuU.js → register_all_kernels-nvj2k7OC.js} +41 -41
- package/dist/{relu-yZ2-7WxU.js → relu-BYDneVPn.js} +2 -2
- package/dist/{reshape-DevtBWtf.js → reshape-CaPQzFvz.js} +2 -2
- package/dist/{rope-B5UUMsPi.js → rope-s4W2XO9B.js} +5 -5
- package/dist/{scatter_nd_util-5EL-8VAQ.js → scatter_nd_util-C7zXRT_h.js} +1 -1
- package/dist/{selu_util-D1w6yyTO.js → selu_util-BGPXmd4B.js} +16 -16
- package/dist/{shared-BRksrJb3.js → shared-CHhxz-O5.js} +1 -1
- package/dist/{shared-BuAXb4CI.js → shared-D2NP_CpY.js} +8 -8
- package/dist/{sin-BGfy2HZo.js → sin-Djs4aQiu.js} +2 -2
- package/dist/{slice-D_gkkqZK.js → slice-DvovR5wq.js} +2 -2
- package/dist/{slice_util-DtEldBfK.js → slice_util-DyjSAD0u.js} +1 -1
- package/dist/{softmax-ZHVebtR1.js → softmax-C9JQEtnO.js} +2 -2
- package/dist/{split-DrfihRpZ.js → split-DBck65sX.js} +2 -2
- package/dist/{squeeze-DZEpeblb.js → squeeze-C00Ipm_7.js} +3 -3
- package/dist/{stack-yOIAalTq.js → stack-ChnHwRpX.js} +3 -3
- package/dist/{sum-_fzj5ZTB.js → sum-ywRJj3Zr.js} +2 -2
- package/dist/{tensor-f35l8Odg.js → tensor-0r5yOo2R.js} +1 -1
- package/dist/{tensor-DdQUJZlz.js → tensor-CzmOBsdf.js} +21 -21
- package/dist/{tensor1d-CeZuc-Rv.js → tensor1d-BlUT89BP.js} +2 -2
- package/dist/{tensor2d-G4Ys2GxX.js → tensor2d-CSB4KOb0.js} +2 -2
- package/dist/{tensor4d-B8roDgtc.js → tensor4d-D7bLqGqz.js} +2 -2
- package/dist/{tensor_util-DV-FP5Q3.js → tensor_util-DfwaWayG.js} +12 -12
- package/dist/{tfjs_backend-kNyO5L2d.js → tfjs_backend-CNkSTL0c.js} +38 -38
- package/dist/{tile-BzyEiF-F.js → tile-CR074jmp.js} +3 -3
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.js +2 -2
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.js +2 -2
- package/dist/training/sparseCrossEntropy.js +3 -3
- package/dist/{transpose-DKELTqhe.js → transpose-DH4gmHvu.js} +4 -4
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.js +338 -304
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.js +5 -5
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-Bhn5bHYv.js → variable-DzfrwYuP.js} +1 -1
- package/dist/{webgpu_program-Cigz-7RF.js → webgpu_program-DzaQiqel.js} +2 -2
- package/dist/{webgpu_util-BBCnKm2X.js → webgpu_util-0_ubCEHJ.js} +2 -2
- package/dist/{zeros-2gldETuK.js → zeros-DBFVbpv5.js} +3 -3
- package/package.json +1 -1
|
@@ -1,22 +1,22 @@
|
|
|
1
1
|
import "../utilities/packed.js";
|
|
2
|
-
import { H as y } from "../index-
|
|
2
|
+
import { H as y } from "../index-D6Q1lPZO.js";
|
|
3
3
|
import "../ops/cpu/attentionMask.js";
|
|
4
4
|
import "../ops/webgl/attentionMask.js";
|
|
5
5
|
import "../ops/grads/attentionMask.js";
|
|
6
|
-
import "../random_width-
|
|
7
|
-
import "../register_all_kernels-
|
|
6
|
+
import "../random_width-BVV9HveY.js";
|
|
7
|
+
import "../register_all_kernels-nvj2k7OC.js";
|
|
8
8
|
import "../index-Cp39cXWe.js";
|
|
9
|
-
import "../dataset-
|
|
9
|
+
import "../dataset-D2P7rHAw.js";
|
|
10
10
|
import "../ops/cpu/rope.js";
|
|
11
11
|
import "../ops/webgl/rope.js";
|
|
12
|
-
import "../rope-
|
|
12
|
+
import "../rope-s4W2XO9B.js";
|
|
13
13
|
import "../ops/cpu/appendCache.js";
|
|
14
14
|
import "../ops/webgl/appendCache.js";
|
|
15
15
|
import "../ops/grads/softmax16.js";
|
|
16
|
-
import "../matMul16
|
|
16
|
+
import "../matMul16-fEAJ4smh.js";
|
|
17
17
|
import "../ops/webgl/matMul16.js";
|
|
18
18
|
import "../ops/cpu/matMul16.js";
|
|
19
|
-
import "../pack16-
|
|
19
|
+
import "../pack16-CmVZs6af.js";
|
|
20
20
|
import "../ops/transpose16.js";
|
|
21
21
|
import "../ops/reshape16.js";
|
|
22
22
|
import "../ops/cpu/qkv.js";
|
|
@@ -47,7 +47,7 @@ import "../ops/webgl/matMulGelu.js";
|
|
|
47
47
|
import "../ops/grads/matMulGelu.js";
|
|
48
48
|
import "../ops/cpu/gelu.js";
|
|
49
49
|
import "../ops/webgl/gelu.js";
|
|
50
|
-
import "../gelu-
|
|
50
|
+
import "../gelu-Bmhopi0J.js";
|
|
51
51
|
import "../ops/webgl/log.js";
|
|
52
52
|
import "../checks/normRMS.js";
|
|
53
53
|
import "../checks/normRMSGrad.js";
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { A as e, B as x, E as p, c as E, n as $ } from "./index-
|
|
2
|
-
import { e as d } from "./axis_util-
|
|
3
|
-
import { y as h, z as S, L as K } from "./tensor_util-
|
|
4
|
-
import { r as m } from "./reshape-
|
|
5
|
-
import { s as _ } from "./sum-
|
|
6
|
-
import { p as T } from "./tensor-
|
|
1
|
+
import { A as e, B as x, E as p, c as E, n as $ } from "./index-D6Q1lPZO.js";
|
|
2
|
+
import { e as d } from "./axis_util-BsIr9ZNu.js";
|
|
3
|
+
import { y as h, z as S, L as K } from "./tensor_util-DfwaWayG.js";
|
|
4
|
+
import { r as m } from "./reshape-CaPQzFvz.js";
|
|
5
|
+
import { s as _ } from "./sum-ywRJj3Zr.js";
|
|
6
|
+
import { p as T } from "./tensor-CzmOBsdf.js";
|
|
7
7
|
function b(s, o = null, n = !1) {
|
|
8
8
|
const t = { x: x(s, "x", "max") }, r = { reductionIndices: o, keepDims: n };
|
|
9
9
|
return p.runKernel(h, t, r);
|
package/dist/main.js
CHANGED
|
@@ -9,7 +9,7 @@ import { default as uo } from "./Generator.js";
|
|
|
9
9
|
import { default as To } from "./models/model.js";
|
|
10
10
|
import { estimateMemoryUsage as go, estimateParameterCount as Mo, estimateResources as Po, estimateTrainingMemoryUsage as Co, validateConfig as Eo } from "./utilities/parameters.js";
|
|
11
11
|
import { default as Bo } from "./utilities/topP.js";
|
|
12
|
-
import "./index-
|
|
12
|
+
import "./index-D6Q1lPZO.js";
|
|
13
13
|
import "./ops/cpu/scatterSub.js";
|
|
14
14
|
import "./ops/webgl/scatterSub.js";
|
|
15
15
|
import "./ops/cpu/gatherSub.js";
|
|
@@ -20,13 +20,13 @@ import "./ops/grads/attentionMask.js";
|
|
|
20
20
|
import "./ops/cpu/qkv.js";
|
|
21
21
|
import "./ops/webgl/qkv.js";
|
|
22
22
|
import "./ops/grads/qkv.js";
|
|
23
|
-
import "./random_width-
|
|
24
|
-
import "./register_all_kernels-
|
|
23
|
+
import "./random_width-BVV9HveY.js";
|
|
24
|
+
import "./register_all_kernels-nvj2k7OC.js";
|
|
25
25
|
import "./index-Cp39cXWe.js";
|
|
26
|
-
import "./dataset-
|
|
26
|
+
import "./dataset-D2P7rHAw.js";
|
|
27
27
|
import "./ops/cpu/rope.js";
|
|
28
28
|
import "./ops/webgl/rope.js";
|
|
29
|
-
import "./rope-
|
|
29
|
+
import "./rope-s4W2XO9B.js";
|
|
30
30
|
import "./ops/cpu/appendCache.js";
|
|
31
31
|
import "./ops/webgl/appendCache.js";
|
|
32
32
|
import "./ops/cpu/matMulGelu.js";
|
|
@@ -34,7 +34,7 @@ import "./ops/webgl/matMulGelu.js";
|
|
|
34
34
|
import "./ops/grads/matMulGelu.js";
|
|
35
35
|
import "./ops/cpu/gelu.js";
|
|
36
36
|
import "./ops/webgl/gelu.js";
|
|
37
|
-
import "./gelu-
|
|
37
|
+
import "./gelu-Bmhopi0J.js";
|
|
38
38
|
import "./ops/cpu/normRMS.js";
|
|
39
39
|
import "./ops/webgl/normRMS.js";
|
|
40
40
|
import "./ops/grads/normRMS.js";
|
|
@@ -43,9 +43,9 @@ import "./ops/cpu/adamMoments.js";
|
|
|
43
43
|
import "./ops/webgl/adamMoments.js";
|
|
44
44
|
import "./ops/cpu/adamAdjust.js";
|
|
45
45
|
import "./ops/webgl/adamAdjust.js";
|
|
46
|
-
import { u as o, p as r } from "./pack16-
|
|
46
|
+
import { u as o, p as r } from "./pack16-CmVZs6af.js";
|
|
47
47
|
import "./ops/grads/softmax16.js";
|
|
48
|
-
import "./matMul16
|
|
48
|
+
import "./matMul16-fEAJ4smh.js";
|
|
49
49
|
import "./ops/webgl/matMul16.js";
|
|
50
50
|
import "./ops/cpu/matMul16.js";
|
|
51
51
|
import "./ops/transpose16.js";
|
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
import { e as y } from "./index-
|
|
1
|
+
import { e as y } from "./index-D6Q1lPZO.js";
|
|
2
2
|
import "./ops/webgl/matMul16.js";
|
|
3
3
|
import "./ops/cpu/matMul16.js";
|
|
4
4
|
import { isPackedTensor as g, packTensor as k } from "./utilities/packed.js";
|
|
5
|
-
import { p as v } from "./pack16-
|
|
6
|
-
import { d as h } from "./gelu-
|
|
5
|
+
import { p as v } from "./pack16-CmVZs6af.js";
|
|
6
|
+
import { d as h } from "./gelu-Bmhopi0J.js";
|
|
7
7
|
import { transpose16 as S } from "./ops/transpose16.js";
|
|
8
8
|
import { reshape16 as w } from "./ops/reshape16.js";
|
|
9
|
-
import { a as G } from "./tensor_util-
|
|
9
|
+
import { a as G } from "./tensor_util-DfwaWayG.js";
|
|
10
10
|
const T = {
|
|
11
11
|
kernelName: "MatMul16",
|
|
12
12
|
inputsToSave: ["A", "B"],
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { A as u, B as s, E as c } from "./index-
|
|
2
|
-
import { m as M, B as p } from "./tensor_util-
|
|
1
|
+
import { A as u, B as s, E as c } from "./index-D6Q1lPZO.js";
|
|
2
|
+
import { m as M, B as p } from "./tensor_util-DfwaWayG.js";
|
|
3
3
|
function f(o, e, r = !1, m = !1) {
|
|
4
4
|
let t = s(o, "a", "matMul"), a = s(e, "b", "matMul");
|
|
5
5
|
[t, a] = M(t, a);
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { A as e, B as a, E as n } from "./index-
|
|
2
|
-
import { m as p, M as c } from "./tensor_util-
|
|
1
|
+
import { A as e, B as a, E as n } from "./index-D6Q1lPZO.js";
|
|
2
|
+
import { m as p, M as c } from "./tensor_util-DfwaWayG.js";
|
|
3
3
|
function d(m, r) {
|
|
4
4
|
let o = a(m, "a", "mod"), t = a(r, "b", "mod");
|
|
5
5
|
[o, t] = p(o, t);
|
package/dist/models/NanoGPTV1.js
CHANGED
|
@@ -3,11 +3,11 @@ import b from "../layers/TransformerBlock.js";
|
|
|
3
3
|
import k from "../layers/TiedEmbedding.js";
|
|
4
4
|
import w from "../layers/RoPECache.js";
|
|
5
5
|
import E from "../layers/RMSNorm.js";
|
|
6
|
-
import { t as l, k as u } from "../index-
|
|
6
|
+
import { t as l, k as u } from "../index-D6Q1lPZO.js";
|
|
7
7
|
import C from "./model.js";
|
|
8
8
|
import P from "../layers/PositionEmbedding.js";
|
|
9
9
|
import { packingSupported as _ } from "../utilities/packed.js";
|
|
10
|
-
import { p as y, u as M } from "../pack16-
|
|
10
|
+
import { p as y, u as M } from "../pack16-CmVZs6af.js";
|
|
11
11
|
class I extends C {
|
|
12
12
|
wte;
|
|
13
13
|
// Token embeddings
|
package/dist/models/model.js
CHANGED
|
@@ -1,23 +1,23 @@
|
|
|
1
1
|
import m from "../layers/BaseLayer.js";
|
|
2
2
|
import "../utilities/packed.js";
|
|
3
|
-
import "../index-
|
|
3
|
+
import "../index-D6Q1lPZO.js";
|
|
4
4
|
import "../ops/cpu/attentionMask.js";
|
|
5
5
|
import "../ops/webgl/attentionMask.js";
|
|
6
6
|
import "../ops/grads/attentionMask.js";
|
|
7
|
-
import "../random_width-
|
|
8
|
-
import "../register_all_kernels-
|
|
7
|
+
import "../random_width-BVV9HveY.js";
|
|
8
|
+
import "../register_all_kernels-nvj2k7OC.js";
|
|
9
9
|
import "../index-Cp39cXWe.js";
|
|
10
|
-
import "../dataset-
|
|
10
|
+
import "../dataset-D2P7rHAw.js";
|
|
11
11
|
import "../ops/cpu/rope.js";
|
|
12
12
|
import "../ops/webgl/rope.js";
|
|
13
|
-
import "../rope-
|
|
13
|
+
import "../rope-s4W2XO9B.js";
|
|
14
14
|
import "../ops/cpu/appendCache.js";
|
|
15
15
|
import "../ops/webgl/appendCache.js";
|
|
16
16
|
import "../ops/grads/softmax16.js";
|
|
17
|
-
import "../matMul16
|
|
17
|
+
import "../matMul16-fEAJ4smh.js";
|
|
18
18
|
import "../ops/webgl/matMul16.js";
|
|
19
19
|
import "../ops/cpu/matMul16.js";
|
|
20
|
-
import "../pack16-
|
|
20
|
+
import "../pack16-CmVZs6af.js";
|
|
21
21
|
import "../ops/transpose16.js";
|
|
22
22
|
import "../ops/reshape16.js";
|
|
23
23
|
import "../ops/cpu/qkv.js";
|
|
@@ -45,7 +45,7 @@ import "../ops/webgl/matMulGelu.js";
|
|
|
45
45
|
import "../ops/grads/matMulGelu.js";
|
|
46
46
|
import "../ops/cpu/gelu.js";
|
|
47
47
|
import "../ops/webgl/gelu.js";
|
|
48
|
-
import "../gelu-
|
|
48
|
+
import "../gelu-Bmhopi0J.js";
|
|
49
49
|
import "../ops/webgl/log.js";
|
|
50
50
|
import "../checks/normRMS.js";
|
|
51
51
|
import "../checks/normRMSGrad.js";
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { u as z } from "./gpgpu_math-
|
|
1
|
+
import { u as z } from "./gpgpu_math-DsCcikas.js";
|
|
2
2
|
class g {
|
|
3
3
|
constructor(e, s, v, a = !1, r = !1, c = !1, t = null, o = !1, l = !1) {
|
|
4
4
|
this.variableNames = ["matrixA", "matrixB"], this.packedInputs = !0, this.packedOutput = !0, this.outputShape = v, this.enableShapeUniforms = z(this.outputShape.length);
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { E as a } from "./index-
|
|
2
|
-
import { d as t, m as n, s as i } from "./tensor-
|
|
3
|
-
import { c as f } from "./complex-
|
|
4
|
-
import { z as c } from "./zeros-
|
|
1
|
+
import { E as a } from "./index-D6Q1lPZO.js";
|
|
2
|
+
import { d as t, m as n, s as i } from "./tensor-CzmOBsdf.js";
|
|
3
|
+
import { c as f } from "./complex-BBiRlsVq.js";
|
|
4
|
+
import { z as c } from "./zeros-DBFVbpv5.js";
|
|
5
5
|
function l(o, r = "float32") {
|
|
6
6
|
if (t(o), r === "complex64") {
|
|
7
7
|
const e = l(o, "float32"), m = c(o, "float32");
|
package/dist/ops/adamAdjust.js
CHANGED
package/dist/ops/adamMoments.js
CHANGED
package/dist/ops/add16.js
CHANGED
package/dist/ops/appendCache.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { e as a } from "../index-
|
|
1
|
+
import { e as a } from "../index-D6Q1lPZO.js";
|
|
2
2
|
import "./cpu/appendCache.js";
|
|
3
3
|
import "./webgl/appendCache.js";
|
|
4
4
|
import { isPackedTensor as c } from "../utilities/packed.js";
|
|
5
|
-
import { c as t } from "../concat-
|
|
6
|
-
import { z as f } from "../zeros-
|
|
5
|
+
import { c as t } from "../concat-DmBLPVGC.js";
|
|
6
|
+
import { z as f } from "../zeros-DBFVbpv5.js";
|
|
7
7
|
function C(r, o, n, p) {
|
|
8
8
|
if (!p) {
|
|
9
9
|
const e = r.shape[2], s = c(r);
|
package/dist/ops/concat16.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { isPackedTensor as o } from "../utilities/packed.js";
|
|
2
|
-
import { e } from "../index-
|
|
3
|
-
import { c } from "../concat-
|
|
2
|
+
import { e } from "../index-D6Q1lPZO.js";
|
|
3
|
+
import { c } from "../concat-DmBLPVGC.js";
|
|
4
4
|
function p(r, n) {
|
|
5
5
|
return o(r[0]) ? e().runKernel("Concat16", r, { axis: n ?? -1 }) : c(r, n);
|
|
6
6
|
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { l as t, n as r, m as k, o as z } from "../../index-
|
|
2
|
-
import { r as A } from "../../tensor_util-
|
|
1
|
+
import { l as t, n as r, m as k, o as z } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as A } from "../../tensor_util-DfwaWayG.js";
|
|
3
3
|
function C(o) {
|
|
4
4
|
const { moments: n, value: i } = o.inputs, { beta1: l, beta2: m, epsilon: u, learningRate: d } = o.attrs, e = n.shape.length, c = new Array(e).fill(0), s = n.shape.slice();
|
|
5
5
|
s[e - 1] = 1;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { r as p } from "../../tensor_util-
|
|
3
|
-
import { s as b } from "../../stack-
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as p } from "../../tensor_util-DfwaWayG.js";
|
|
3
|
+
import { s as b } from "../../stack-ChnHwRpX.js";
|
|
4
4
|
function f(t) {
|
|
5
5
|
const { moments: n, gradient: o } = t.inputs, { beta1: c, beta2: m } = t.attrs, e = n.shape.length, a = new Array(e).fill(0), s = n.shape.slice();
|
|
6
6
|
s[e - 1] = 1;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { r as d } from "../../tensor_util-
|
|
3
|
-
import { c as h } from "../../concat-
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as d } from "../../tensor_util-DfwaWayG.js";
|
|
3
|
+
import { c as h } from "../../concat-DmBLPVGC.js";
|
|
4
4
|
function u(p) {
|
|
5
5
|
const { cache: n, item: s } = p.inputs, { maxSize: a, pastLen: c } = p.attrs, t = n.shape[0], o = n.shape[1], r = n.shape[3], e = s.shape[2];
|
|
6
6
|
if (c + e <= a) {
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { i as d, b as u } from "../../index-
|
|
2
|
-
import { r as o } from "../../tensor_util-
|
|
3
|
-
import { l as N, w as b } from "../../ops-
|
|
4
|
-
import { o as A } from "../../ones-
|
|
5
|
-
import { z as I } from "../../zeros-
|
|
6
|
-
import { m as g } from "../../mat_mul-
|
|
1
|
+
import { i as d, b as u } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as o } from "../../tensor_util-DfwaWayG.js";
|
|
3
|
+
import { l as N, w as b } from "../../ops-BFDtP6th.js";
|
|
4
|
+
import { o as A } from "../../ones-jU9jlQvM.js";
|
|
5
|
+
import { z as I } from "../../zeros-DBFVbpv5.js";
|
|
6
|
+
import { m as g } from "../../mat_mul-C59XWcJd.js";
|
|
7
7
|
function a(n) {
|
|
8
8
|
const { q: s, k: e } = n.inputs, { divisor: r } = n.attrs, c = s.shape[2], t = e.shape[2], m = N.bandPart(A([t, t]), -1, 0).cast("bool"), i = I([t, t]), l = d([t, t], Number.NEGATIVE_INFINITY), f = b(m, i, l), k = g(s, e, !1, !0).mul(u(r)), p = f.slice([0, 0], [c, t]).expandDims(0).expandDims(0);
|
|
9
9
|
return k.add(p);
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { r as e } from "../../tensor_util-
|
|
3
|
-
import { s as m } from "../../softmax-
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as e } from "../../tensor_util-DfwaWayG.js";
|
|
3
|
+
import { s as m } from "../../softmax-C9JQEtnO.js";
|
|
4
4
|
function o(t) {
|
|
5
5
|
const { inputs: s, attrs: a } = t, { logits: n } = s, { dim: i, dropoutRate: r } = a;
|
|
6
6
|
if (!n)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { A as u, B as c, E as m, c as g } from "../../index-
|
|
2
|
-
import { k as p, r as h } from "../../tensor_util-
|
|
3
|
-
import { r as f } from "../../range-
|
|
4
|
-
import { s as l } from "../../stack-
|
|
1
|
+
import { A as u, B as c, E as m, c as g } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { k as p, r as h } from "../../tensor_util-DfwaWayG.js";
|
|
3
|
+
import { r as f } from "../../range-ZZZD60Fx.js";
|
|
4
|
+
import { s as l } from "../../stack-ChnHwRpX.js";
|
|
5
5
|
function N(e, t) {
|
|
6
6
|
const n = c(t, "indices", "gatherND", "int32"), r = { params: c(e, "x", "gatherND", "string_or_numeric"), indices: n };
|
|
7
7
|
return m.runKernel(p, r);
|
package/dist/ops/cpu/gelu.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { t as d } from "../../index-
|
|
2
|
-
import { r } from "../../tensor_util-
|
|
1
|
+
import { t as d } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r } from "../../tensor_util-DfwaWayG.js";
|
|
3
3
|
const o = 0.7978845608028654, c = 0.044715;
|
|
4
4
|
function m(t) {
|
|
5
5
|
const { inputs: u } = t, { x: n } = u, e = n;
|
package/dist/ops/cpu/matMul16.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { isPackedTensor as t } from "../../utilities/packed.js";
|
|
2
|
-
import "../../index-
|
|
3
|
-
import { r as p } from "../../tensor_util-
|
|
4
|
-
import { m } from "../../mat_mul-
|
|
2
|
+
import "../../index-D6Q1lPZO.js";
|
|
3
|
+
import { r as p } from "../../tensor_util-DfwaWayG.js";
|
|
4
|
+
import { m } from "../../mat_mul-C59XWcJd.js";
|
|
5
5
|
function l(r) {
|
|
6
6
|
const { A: e, B: n } = r.inputs, { transposeA: o, transposeB: s } = r.attrs, a = !t(e), c = !t(n);
|
|
7
7
|
if (a && c)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { t as m } from "../../index-
|
|
2
|
-
import { g as i, d as M } from "../../gelu-
|
|
3
|
-
import { r as e } from "../../tensor_util-
|
|
4
|
-
import { m as k } from "../../mat_mul-
|
|
1
|
+
import { t as m } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { g as i, d as M } from "../../gelu-Bmhopi0J.js";
|
|
3
|
+
import { r as e } from "../../tensor_util-DfwaWayG.js";
|
|
4
|
+
import { m as k } from "../../mat_mul-C59XWcJd.js";
|
|
5
5
|
function c(t) {
|
|
6
6
|
const { inputs: u } = t, { x: n, kernel: r } = u, a = n, l = r;
|
|
7
7
|
return m(() => {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { t as M } from "../../index-
|
|
2
|
-
import { r as e } from "../../tensor_util-
|
|
1
|
+
import { t as M } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as e } from "../../tensor_util-DfwaWayG.js";
|
|
3
3
|
function n(t) {
|
|
4
4
|
const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, i = a, k = c;
|
|
5
5
|
return M(() => m.matMul(i, o, s).mul(k));
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { m as u } from "../../index-
|
|
2
|
-
import { r as e } from "../../tensor_util-
|
|
1
|
+
import { m as u } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as e } from "../../tensor_util-DfwaWayG.js";
|
|
3
3
|
function n(o) {
|
|
4
4
|
const { inputs: r } = o, { a: l, b: t } = r;
|
|
5
5
|
return console.warn("Using fallback mulDrop implementation without dropout."), u(l, t);
|
package/dist/ops/cpu/normRMS.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { t as d } from "../../index-
|
|
2
|
-
import { r as a } from "../../tensor_util-
|
|
1
|
+
import { t as d } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as a } from "../../tensor_util-DfwaWayG.js";
|
|
3
3
|
function i(t) {
|
|
4
4
|
const { inputs: e } = t, { x: n, gamma: s } = e, r = n, m = s;
|
|
5
5
|
return d(() => {
|
package/dist/ops/cpu/qkv.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { r as q } from "../../tensor_util-
|
|
3
|
-
import { r as o } from "../../reshape-
|
|
4
|
-
import { s as x } from "../../split-
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as q } from "../../tensor_util-DfwaWayG.js";
|
|
3
|
+
import { r as o } from "../../reshape-CaPQzFvz.js";
|
|
4
|
+
import { s as x } from "../../split-DBck65sX.js";
|
|
5
5
|
function v(p) {
|
|
6
6
|
const { x: c, kernel: K } = p.inputs, { heads: n, packed: C } = p.attrs;
|
|
7
7
|
if (C)
|
package/dist/ops/cpu/rope.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { r as I } from "../../tensor_util-
|
|
3
|
-
import { r as y } from "../../range-
|
|
4
|
-
import { g as F } from "../../gather-
|
|
5
|
-
import { s as E } from "../../stack-
|
|
6
|
-
import { c as T } from "../../concat-
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { r as I } from "../../tensor_util-DfwaWayG.js";
|
|
3
|
+
import { r as y } from "../../range-ZZZD60Fx.js";
|
|
4
|
+
import { g as F } from "../../gather-CH9sdacz.js";
|
|
5
|
+
import { s as E } from "../../stack-ChnHwRpX.js";
|
|
6
|
+
import { c as T } from "../../concat-DmBLPVGC.js";
|
|
7
7
|
function U(c, r, p, e, n) {
|
|
8
8
|
const t = e.shape[3], s = p;
|
|
9
9
|
if (s > t) return e;
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import { A as f, B as c, E as g, c as l, m as N } from "../../index-
|
|
2
|
-
import { j as b, r as S } from "../../tensor_util-
|
|
3
|
-
import { d as h } from "../../tensor-
|
|
4
|
-
import { v as D } from "../../scatter_nd_util-
|
|
5
|
-
import { r as k } from "../../range-
|
|
6
|
-
import { s as v } from "../../stack-
|
|
7
|
-
import { o as E } from "../../ones-
|
|
1
|
+
import { A as f, B as c, E as g, c as l, m as N } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { j as b, r as S } from "../../tensor_util-DfwaWayG.js";
|
|
3
|
+
import { d as h } from "../../tensor-CzmOBsdf.js";
|
|
4
|
+
import { v as D } from "../../scatter_nd_util-C7zXRT_h.js";
|
|
5
|
+
import { r as k } from "../../range-ZZZD60Fx.js";
|
|
6
|
+
import { s as v } from "../../stack-ChnHwRpX.js";
|
|
7
|
+
import { o as E } from "../../ones-jU9jlQvM.js";
|
|
8
8
|
function I(r, e, s) {
|
|
9
9
|
h(s);
|
|
10
10
|
const n = c(r, "indices", "scatterND", "int32"), t = c(e, "updates", "scatterND");
|
package/dist/ops/dot16.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import { b as d } from "../matMul16
|
|
1
|
+
import { b as d } from "../matMul16-fEAJ4smh.js";
|
|
2
2
|
import { transpose16 as w } from "./transpose16.js";
|
|
3
3
|
import { reshape16 as n } from "./reshape16.js";
|
|
4
4
|
import { isPackedTensor as p } from "../utilities/packed.js";
|
|
5
|
-
import { d as x } from "../tfjs_backend-
|
|
5
|
+
import { d as x } from "../tfjs_backend-CNkSTL0c.js";
|
|
6
6
|
function E(e, s, h = !1, c = !1) {
|
|
7
7
|
if (!p(e) && !p(s))
|
|
8
8
|
return x(e, s);
|
package/dist/ops/gatherSub.js
CHANGED
package/dist/ops/gelu.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import "../index-
|
|
1
|
+
import "../index-D6Q1lPZO.js";
|
|
2
2
|
import "./cpu/gelu.js";
|
|
3
3
|
import "./webgl/gelu.js";
|
|
4
|
-
import { d as e, g as i } from "../gelu-
|
|
4
|
+
import { d as e, g as i } from "../gelu-Bmhopi0J.js";
|
|
5
5
|
export {
|
|
6
6
|
e as dGelu,
|
|
7
7
|
i as gelu
|
package/dist/ops/grads/add16.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { j as u, q as d } from "../../index-
|
|
1
|
+
import { j as u, q as d } from "../../index-D6Q1lPZO.js";
|
|
2
2
|
import { sum16 as p } from "../sum16.js";
|
|
3
3
|
import { reshape16 as c } from "../reshape16.js";
|
|
4
|
-
import { a as h } from "../../tensor_util-
|
|
4
|
+
import { a as h } from "../../tensor_util-DfwaWayG.js";
|
|
5
5
|
const m = {
|
|
6
6
|
kernelName: "Add16",
|
|
7
7
|
inputsToSave: ["a", "b"],
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { m as o } from "../../matMul16
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { m as o } from "../../matMul16-fEAJ4smh.js";
|
|
3
3
|
import { transpose16 as m } from "../transpose16.js";
|
|
4
|
-
import { a as c } from "../../tensor_util-
|
|
4
|
+
import { a as c } from "../../tensor_util-DfwaWayG.js";
|
|
5
5
|
const l = {
|
|
6
6
|
kernelName: "AttentionMask",
|
|
7
7
|
inputsToSave: ["q", "k"],
|
package/dist/ops/grads/gelu.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as m } from "../../gelu-
|
|
3
|
-
import "../../tensor_util-
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { a as m } from "../../gelu-Bmhopi0J.js";
|
|
3
|
+
import "../../tensor_util-DfwaWayG.js";
|
|
4
4
|
export {
|
|
5
5
|
m as geluGradConfig
|
|
6
6
|
};
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as f } from "../../matMul16
|
|
3
|
-
import "../../gelu-
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { a as f } from "../../matMul16-fEAJ4smh.js";
|
|
3
|
+
import "../../gelu-Bmhopi0J.js";
|
|
4
4
|
import "../transpose16.js";
|
|
5
5
|
import "../reshape16.js";
|
|
6
|
-
import "../../tensor_util-
|
|
6
|
+
import "../../tensor_util-DfwaWayG.js";
|
|
7
7
|
export {
|
|
8
8
|
f as matMul16GradConfig
|
|
9
9
|
};
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { e as l } from "../../index-
|
|
2
|
-
import { a as o } from "../../tensor_util-
|
|
1
|
+
import { e as l } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { a as o } from "../../tensor_util-DfwaWayG.js";
|
|
3
3
|
function i(e, r, n) {
|
|
4
4
|
return l().runKernel("MatMulGeluGrad", { dy: e, x: r, kernel: n });
|
|
5
5
|
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { e as t } from "../../index-
|
|
2
|
-
import { a as g } from "../../tensor_util-
|
|
1
|
+
import { e as t } from "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { a as g } from "../../tensor_util-DfwaWayG.js";
|
|
3
3
|
function i(r, a, m) {
|
|
4
4
|
return t().runKernel("RMSNormGrad", { dy: r, x: a, gamma: m });
|
|
5
5
|
}
|
package/dist/ops/grads/pack16.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { b as t } from "../../pack16-
|
|
3
|
-
import "../../slice-
|
|
4
|
-
import "../../tensor_util-
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { b as t } from "../../pack16-CmVZs6af.js";
|
|
3
|
+
import "../../slice-DvovR5wq.js";
|
|
4
|
+
import "../../tensor_util-DfwaWayG.js";
|
|
5
5
|
export {
|
|
6
6
|
t as packGradConfig
|
|
7
7
|
};
|
package/dist/ops/grads/qkv.js
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as u } from "../../matMul16
|
|
1
|
+
import "../../index-D6Q1lPZO.js";
|
|
2
|
+
import { a as u } from "../../matMul16-fEAJ4smh.js";
|
|
3
3
|
import { concat16 as f } from "../concat16.js";
|
|
4
4
|
import { sum16 as g } from "../sum16.js";
|
|
5
5
|
import { packTensor as k, isPackedTensor as l } from "../../utilities/packed.js";
|
|
6
|
-
import { a as h } from "../../tensor_util-
|
|
7
|
-
import { s as G } from "../../squeeze-
|
|
6
|
+
import { a as h } from "../../tensor_util-DfwaWayG.js";
|
|
7
|
+
import { s as G } from "../../squeeze-C00Ipm_7.js";
|
|
8
8
|
const m = {
|
|
9
9
|
kernelName: "QKV",
|
|
10
10
|
inputsToSave: ["x", "kernel"],
|
package/dist/ops/grads/rope.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import "../../utilities/packed.js";
|
|
2
|
-
import "../../index-
|
|
3
|
-
import { a as t } from "../../rope-
|
|
4
|
-
import "../../tensor_util-
|
|
2
|
+
import "../../index-D6Q1lPZO.js";
|
|
3
|
+
import { a as t } from "../../rope-s4W2XO9B.js";
|
|
4
|
+
import "../../tensor_util-DfwaWayG.js";
|
|
5
5
|
export {
|
|
6
6
|
t as ropeGradConfig
|
|
7
7
|
};
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as n } from "../../index-
|
|
1
|
+
import { e as n } from "../../index-D6Q1lPZO.js";
|
|
2
2
|
import { isPackedTensor as t } from "../../utilities/packed.js";
|
|
3
|
-
import { a } from "../../tensor_util-
|
|
3
|
+
import { a } from "../../tensor_util-DfwaWayG.js";
|
|
4
4
|
function s(r, e) {
|
|
5
5
|
return n().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
|
|
6
6
|
}
|