@genai-fi/nanogpt 0.10.2 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.d.ts +10 -5
- package/dist/Generator.js +11760 -146
- package/dist/{RealDiv-zz7FpkKX.js → RealDiv-Ds-jvL09.js} +28 -30
- package/dist/Reshape-Cd6e-Otn.js +14 -0
- package/dist/{Reshape-CHdUjC72.js → Reshape-Ct266DEk.js} +21 -23
- package/dist/TeachableLLM.d.ts +4 -3
- package/dist/TeachableLLM.js +15 -16
- package/dist/Trainer.d.ts +2 -2
- package/dist/Trainer.js +6 -6
- package/dist/{axis_util-BsIr9ZNu.js → axis_util-DofAuy0p.js} +1 -1
- package/dist/backend.js +2 -2
- package/dist/{backend_util-B1XRLuq9.js → backend_util-C7NWHpv7.js} +72 -73
- package/dist/{backend_webgpu-CqpfEImu.js → backend_webgpu-B0Vls736.js} +52 -54
- package/dist/broadcast_to-DDaNMbX7.js +28 -0
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +7 -11
- package/dist/checks/normRMS.js +9 -9
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +11 -12
- package/dist/checks/rope.js +2 -2
- package/dist/clip_by_value-Dn5tzexi.js +12 -0
- package/dist/complex-DClmWqJt.js +11 -0
- package/dist/concat-C6X3AAlQ.js +17 -0
- package/dist/{concat_util-iBYIyuQe.js → concat_util-CHsJFZJJ.js} +1 -1
- package/dist/{dataset-D2P7rHAw.js → dataset-DcjWqUVQ.js} +135 -137
- package/dist/dropout-OxuaJz6z.js +92 -0
- package/dist/expand_dims-BzfJK2uc.js +11 -0
- package/dist/{exports_initializers-CZSUJoVE.js → exports_initializers-eS9QJ6ut.js} +1 -1
- package/dist/floor-DIb-lN_u.js +9 -0
- package/dist/gather-BcO5UQNJ.js +9 -0
- package/dist/{gelu-Bmhopi0J.js → gelu-DqTbCx5x.js} +10 -11
- package/dist/{gpgpu_math-DsCcikas.js → gpgpu_math-CJcbnKPC.js} +841 -1015
- package/dist/index-D0RBWjq8.js +3520 -0
- package/dist/{index-DRyE072i.js → index-Dj5TkmPY.js} +330 -331
- package/dist/{kernel_funcs_utils-CWfOAPGO.js → kernel_funcs_utils-CSaumNDs.js} +132 -134
- package/dist/layers/BaseLayer.js +15 -16
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +7 -7
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +9 -9
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +21 -22
- package/dist/log_sum_exp-VLZgbFAH.js +39 -0
- package/dist/main.d.ts +1 -1
- package/dist/main.js +49 -50
- package/dist/{matMul16-fEAJ4smh.js → matMul16-cDxwemKj.js} +14 -15
- package/dist/matMulGelu-B2s_80-H.js +163 -0
- package/dist/mat_mul-DxpNTCRz.js +11 -0
- package/dist/mod-PrOKlFxH.js +11 -0
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +13 -14
- package/dist/ones-BX_wEgzB.js +14 -0
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +12 -13
- package/dist/ops/cpu/adamMoments.js +6 -7
- package/dist/ops/cpu/appendCache.js +7 -8
- package/dist/ops/cpu/attentionMask.js +11 -11
- package/dist/ops/cpu/fusedSoftmax.js +10 -11
- package/dist/ops/cpu/gatherSub.js +10 -11
- package/dist/ops/cpu/gelu.js +14 -15
- package/dist/ops/cpu/matMul16.js +6 -7
- package/dist/ops/cpu/matMulGelu.js +5 -6
- package/dist/ops/cpu/matMulMul.js +3 -4
- package/dist/ops/cpu/mulDropout.js +3 -4
- package/dist/ops/cpu/normRMS.js +11 -12
- package/dist/ops/cpu/qkv.js +8 -9
- package/dist/ops/cpu/rope.js +9 -10
- package/dist/ops/cpu/scatterSub.js +14 -16
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +10 -11
- package/dist/ops/grads/attentionMask.js +5 -6
- package/dist/ops/grads/gelu.js +3 -4
- package/dist/ops/grads/matMul16.js +4 -5
- package/dist/ops/grads/matMulGelu.js +8 -9
- package/dist/ops/grads/normRMS.js +9 -10
- package/dist/ops/grads/pack16.js +4 -5
- package/dist/ops/grads/qkv.js +17 -19
- package/dist/ops/grads/rope.js +3 -5
- package/dist/ops/grads/softmax16.js +3 -4
- package/dist/ops/grads/unpack16.js +3 -4
- package/dist/ops/grads/utils.d.ts +1 -0
- package/dist/ops/grads/utils.js +8 -4
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +2 -2
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +3 -4
- package/dist/ops/qkv.js +4 -8
- package/dist/ops/reshape16.js +16 -18
- package/dist/ops/rope.d.ts +1 -1
- package/dist/ops/rope.js +3 -8
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +5 -8
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +23 -24
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +2 -3
- package/dist/ops/webgl/adamMoments.js +1 -2
- package/dist/ops/webgl/appendCache.js +1 -2
- package/dist/ops/webgl/attentionMask.js +5 -6
- package/dist/ops/webgl/fusedSoftmax.js +6 -8
- package/dist/ops/webgl/gatherSub.js +6 -7
- package/dist/ops/webgl/gelu.js +2 -3
- package/dist/ops/webgl/log.js +11 -12
- package/dist/ops/webgl/matMul16.js +15 -16
- package/dist/ops/webgl/matMulGelu.js +7 -111
- package/dist/ops/webgl/matMulMul.js +14 -15
- package/dist/ops/webgl/mulDropout.js +8 -9
- package/dist/ops/webgl/normRMS.js +7 -8
- package/dist/ops/webgl/qkv.js +5 -6
- package/dist/ops/webgl/rope.js +7 -8
- package/dist/ops/webgl/scatterSub.js +5 -6
- package/dist/ops/webgpu/adamAdjust.js +10 -12
- package/dist/ops/webgpu/adamMoments.js +8 -10
- package/dist/ops/webgpu/add16.js +8 -9
- package/dist/ops/webgpu/appendCache.js +23 -25
- package/dist/ops/webgpu/attentionMask.js +10 -12
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +12 -14
- package/dist/ops/webgpu/gatherSub.js +9 -11
- package/dist/ops/webgpu/gelu.js +28 -29
- package/dist/ops/webgpu/matMul16.js +26 -28
- package/dist/ops/webgpu/matMul16_program.js +4 -5
- package/dist/ops/webgpu/mul16.js +7 -8
- package/dist/ops/webgpu/normRMS.js +17 -19
- package/dist/ops/webgpu/normRMSGrad.js +21 -28
- package/dist/ops/webgpu/pack16.js +12 -13
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +13 -15
- package/dist/ops/webgpu/rope.js +25 -27
- package/dist/ops/webgpu/scatterSub.js +7 -9
- package/dist/ops/webgpu/slice16.js +21 -23
- package/dist/ops/webgpu/softmax16.js +17 -19
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +7 -8
- package/dist/ops/webgpu/sub16.js +8 -9
- package/dist/ops/webgpu/sum16.js +19 -21
- package/dist/ops/webgpu/transpose16.js +19 -20
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +11 -12
- package/dist/ops/webgpu/unpack16.js +3 -4
- package/dist/ops/webgpu/utils/binary_op.js +7 -8
- package/dist/ops/webgpu/utils/reductions.js +14 -22
- package/dist/ops-FJapAPfm.js +476 -0
- package/dist/pack16-k4jq6aMX.js +39 -0
- package/dist/patches/webgpu_backend.js +19 -20
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +15 -16
- package/dist/{random_width-BVV9HveY.js → random_width-UGQn4OWb.js} +2506 -2761
- package/dist/range-CuGvVN2c.js +10 -0
- package/dist/relu-Cf80uA2p.js +9 -0
- package/dist/reshape-CkjKPPqB.js +9 -0
- package/dist/resize_nearest_neighbor-DB8k9KN_.js +175 -0
- package/dist/rope-BmZmp9uP.js +24 -0
- package/dist/{scatter_nd_util-C7zXRT_h.js → scatter_nd_util-BY22Cc-C.js} +1 -1
- package/dist/selu_util-BuLbmbrl.js +44 -0
- package/dist/{shared-CHhxz-O5.js → shared-B7USJZgw.js} +1 -1
- package/dist/{shared-D2NP_CpY.js → shared-BQboIImQ.js} +379 -381
- package/dist/slice-Aqy7KbJh.js +12 -0
- package/dist/{slice_util-DyjSAD0u.js → slice_util-D8CQRenR.js} +7 -7
- package/dist/{softmax-C9JQEtnO.js → softmax-faLoUZVT.js} +4 -5
- package/dist/split-BNz5jcGc.js +9 -0
- package/dist/squeeze--YMgaAAf.js +10 -0
- package/dist/stack-WJK22CFn.js +11 -0
- package/dist/step-dXR33iOg.js +261 -0
- package/dist/sum-BdplSvq_.js +11 -0
- package/dist/{tensor-0r5yOo2R.js → tensor-BQqrDvpx.js} +1 -1
- package/dist/tensor1d-LxP9asMm.js +11 -0
- package/dist/{tensor2d-CSB4KOb0.js → tensor2d-BN1sSfQO.js} +6 -7
- package/dist/{tensor4d-D7bLqGqz.js → tensor4d-DVwr7pLF.js} +6 -7
- package/dist/{tfjs_backend-CNkSTL0c.js → tfjs_backend-Vi4JfLzT.js} +256 -265
- package/dist/tile-CvN_LyVr.js +11 -0
- package/dist/tokeniser/BaseTokeniser.d.ts +27 -0
- package/dist/tokeniser/BaseTokeniser.js +94 -0
- package/dist/tokeniser/CharTokeniser.d.ts +4 -3
- package/dist/tokeniser/CharTokeniser.js +46 -32
- package/dist/tokeniser/bpe.d.ts +4 -3
- package/dist/tokeniser/bpe.js +60 -45
- package/dist/tokeniser/type.d.ts +11 -0
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.d.ts +2 -2
- package/dist/training/DatasetBuilder.js +32 -36
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.d.ts +3 -3
- package/dist/training/Trainer.js +2 -2
- package/dist/training/sparseCrossEntropy.js +5 -5
- package/dist/transpose-JawVKyZy.js +36 -0
- package/dist/unsorted_segment_sum-LAbmE9G4.js +277 -0
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.d.ts +1 -4
- package/dist/utilities/packed.js +10 -745
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.js +5 -5
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-DzfrwYuP.js → variable-DQ9yYgEU.js} +1 -1
- package/dist/{webgpu_program-DzaQiqel.js → webgpu_program-CAE4RICo.js} +177 -171
- package/dist/{webgpu_util-0_ubCEHJ.js → webgpu_util-BdovYhXr.js} +34 -35
- package/dist/zeros-DeiE2zTa.js +13 -0
- package/dist/zeros_like-BAz3iKru.js +721 -0
- package/package.json +4 -2
- package/dist/Reshape-CDVLyVfz.js +0 -16
- package/dist/broadcast_to-B0ChcDaz.js +0 -30
- package/dist/complex-BBiRlsVq.js +0 -13
- package/dist/concat-DmBLPVGC.js +0 -19
- package/dist/dropout-B1x1kYMa.js +0 -99
- package/dist/expand_dims-ouvfxQ1n.js +0 -13
- package/dist/gather-CH9sdacz.js +0 -10
- package/dist/index-D6Q1lPZO.js +0 -2157
- package/dist/log_sum_exp-D3ftBNY5.js +0 -41
- package/dist/mat_mul-C59XWcJd.js +0 -12
- package/dist/mod-DESSvHIU.js +0 -12
- package/dist/mulmat_packed_gpu-Coh6qbJk.js +0 -55
- package/dist/ones-jU9jlQvM.js +0 -15
- package/dist/ops-BFDtP6th.js +0 -645
- package/dist/pack16-CmVZs6af.js +0 -41
- package/dist/patches/PackedTensor.d.ts +0 -12
- package/dist/patches/PackedTensor.js +0 -11
- package/dist/patches/engine.d.ts +0 -261
- package/dist/patches/engine.js +0 -12
- package/dist/patches/tape.d.ts +0 -12
- package/dist/patches/tape.js +0 -5
- package/dist/range-ZZZD60Fx.js +0 -11
- package/dist/reciprocal-CrYlsAGD.js +0 -10
- package/dist/register_all_kernels-nvj2k7OC.js +0 -12307
- package/dist/relu-BYDneVPn.js +0 -10
- package/dist/reshape-CaPQzFvz.js +0 -10
- package/dist/rope-s4W2XO9B.js +0 -32
- package/dist/selu_util-BGPXmd4B.js +0 -303
- package/dist/sin-Djs4aQiu.js +0 -16
- package/dist/slice-DvovR5wq.js +0 -13
- package/dist/split-DBck65sX.js +0 -10
- package/dist/squeeze-C00Ipm_7.js +0 -11
- package/dist/stack-ChnHwRpX.js +0 -13
- package/dist/sum-ywRJj3Zr.js +0 -12
- package/dist/tensor-CzmOBsdf.js +0 -909
- package/dist/tensor1d-BlUT89BP.js +0 -12
- package/dist/tensor_util-DfwaWayG.js +0 -523
- package/dist/tile-CR074jmp.js +0 -13
- package/dist/transpose-DH4gmHvu.js +0 -38
- package/dist/zeros-DBFVbpv5.js +0 -14
|
@@ -1,18 +1,17 @@
|
|
|
1
|
-
import { e as
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
return l().runKernel("MatMulGeluGrad", { dy: e, x: r, kernel: n });
|
|
1
|
+
import { j as a, e as o } from "../../index-D0RBWjq8.js";
|
|
2
|
+
function s(e, n, r) {
|
|
3
|
+
return o().runKernel("MatMulGeluGrad", { dy: e, x: n, kernel: r });
|
|
5
4
|
}
|
|
6
|
-
const
|
|
5
|
+
const d = {
|
|
7
6
|
kernelName: "MatMulGelu",
|
|
8
7
|
inputsToSave: ["x", "kernel"],
|
|
9
8
|
outputsToSave: [],
|
|
10
|
-
gradFunc: (e,
|
|
11
|
-
const [
|
|
9
|
+
gradFunc: (e, n) => {
|
|
10
|
+
const [r, t] = n, [u, l] = s(e, r, t);
|
|
12
11
|
return {
|
|
13
12
|
x: () => u,
|
|
14
|
-
kernel: () =>
|
|
13
|
+
kernel: () => l
|
|
15
14
|
};
|
|
16
15
|
}
|
|
17
16
|
};
|
|
18
|
-
|
|
17
|
+
a(d);
|
|
@@ -1,21 +1,20 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
return t().runKernel("RMSNormGrad", { dy: r, x: a, gamma: m });
|
|
1
|
+
import { j as t, e as g } from "../../index-D0RBWjq8.js";
|
|
2
|
+
function s(r, a, n) {
|
|
3
|
+
return g().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
|
|
5
4
|
}
|
|
6
|
-
const
|
|
5
|
+
const u = {
|
|
7
6
|
kernelName: "RMSNorm",
|
|
8
7
|
inputsToSave: ["x", "gamma"],
|
|
9
8
|
outputsToSave: [],
|
|
10
9
|
gradFunc: (r, a) => {
|
|
11
|
-
const [
|
|
10
|
+
const [n, e] = a, [m, o] = s(r, n, e);
|
|
12
11
|
return {
|
|
13
|
-
x: () =>
|
|
14
|
-
gamma: () =>
|
|
12
|
+
x: () => m,
|
|
13
|
+
gamma: () => o
|
|
15
14
|
};
|
|
16
15
|
}
|
|
17
16
|
};
|
|
18
|
-
|
|
17
|
+
t(u);
|
|
19
18
|
export {
|
|
20
|
-
|
|
19
|
+
u as normRMSGradConfig
|
|
21
20
|
};
|
package/dist/ops/grads/pack16.js
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { b as
|
|
3
|
-
import "../../slice-
|
|
4
|
-
import "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import "../../index-D0RBWjq8.js";
|
|
2
|
+
import { b as i } from "../../pack16-k4jq6aMX.js";
|
|
3
|
+
import "../../slice-Aqy7KbJh.js";
|
|
5
4
|
export {
|
|
6
|
-
|
|
5
|
+
i as packGradConfig
|
|
7
6
|
};
|
package/dist/ops/grads/qkv.js
CHANGED
|
@@ -1,36 +1,34 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as
|
|
3
|
-
import { concat16 as
|
|
4
|
-
import { sum16 as
|
|
5
|
-
import {
|
|
6
|
-
|
|
7
|
-
import { s as G } from "../../squeeze-C00Ipm_7.js";
|
|
8
|
-
const m = {
|
|
1
|
+
import { j as u } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { a as f } from "../../matMul16-cDxwemKj.js";
|
|
3
|
+
import { concat16 as g } from "../concat16.js";
|
|
4
|
+
import { sum16 as l } from "../sum16.js";
|
|
5
|
+
import { s as k } from "../../squeeze--YMgaAAf.js";
|
|
6
|
+
const i = {
|
|
9
7
|
kernelName: "QKV",
|
|
10
8
|
inputsToSave: ["x", "kernel"],
|
|
11
9
|
outputsToSave: [],
|
|
12
10
|
gradFunc: (e, s) => {
|
|
13
|
-
const [
|
|
14
|
-
|
|
15
|
-
const
|
|
11
|
+
const [r, n, t] = e, [a] = s, p = g([r, n, t], 1);
|
|
12
|
+
r.dispose(), n.dispose(), t.dispose();
|
|
13
|
+
const m = [a.shape[0], a.shape[1], 3 * a.shape[2]], d = f.gradFunc(p, s, {
|
|
16
14
|
transposeA: !1,
|
|
17
15
|
transposeB: !1,
|
|
18
|
-
originalShape:
|
|
16
|
+
originalShape: m,
|
|
19
17
|
perm: [0, 2, 1, 3]
|
|
20
18
|
});
|
|
21
19
|
return p.dispose(), {
|
|
22
|
-
x: () =>
|
|
20
|
+
x: () => d.A(),
|
|
23
21
|
kernel: () => {
|
|
24
|
-
const
|
|
25
|
-
return
|
|
22
|
+
const o = d.B(), c = o.shape[0] === 1 ? k(o, [0]) : l(o, 0);
|
|
23
|
+
return o.dispose(), c;
|
|
26
24
|
}
|
|
27
25
|
};
|
|
28
26
|
}
|
|
29
27
|
};
|
|
30
|
-
function
|
|
31
|
-
return
|
|
28
|
+
function B(e, s, r) {
|
|
29
|
+
return i.gradFunc(e, [s, r], {});
|
|
32
30
|
}
|
|
33
|
-
|
|
31
|
+
u(i);
|
|
34
32
|
export {
|
|
35
|
-
|
|
33
|
+
B as qkvGrad
|
|
36
34
|
};
|
package/dist/ops/grads/rope.js
CHANGED
|
@@ -1,7 +1,5 @@
|
|
|
1
|
-
import "../../
|
|
2
|
-
import "../../
|
|
3
|
-
import { a as t } from "../../rope-s4W2XO9B.js";
|
|
4
|
-
import "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import "../../index-D0RBWjq8.js";
|
|
2
|
+
import { a as p } from "../../rope-BmZmp9uP.js";
|
|
5
3
|
export {
|
|
6
|
-
|
|
4
|
+
p as ropeGradConfig
|
|
7
5
|
};
|
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { j as n, e as a } from "../../index-D0RBWjq8.js";
|
|
2
2
|
import { isPackedTensor as t } from "../../utilities/packed.js";
|
|
3
|
-
import { a } from "../../tensor_util-DfwaWayG.js";
|
|
4
3
|
function s(r, e) {
|
|
5
|
-
return
|
|
4
|
+
return a().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
|
|
6
5
|
}
|
|
7
6
|
const i = {
|
|
8
7
|
kernelName: "Softmax16",
|
|
@@ -20,7 +19,7 @@ const i = {
|
|
|
20
19
|
};
|
|
21
20
|
}
|
|
22
21
|
};
|
|
23
|
-
|
|
22
|
+
n(i);
|
|
24
23
|
export {
|
|
25
24
|
i as softmax16GradConfig
|
|
26
25
|
};
|
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as
|
|
3
|
-
import "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import "../../index-D0RBWjq8.js";
|
|
2
|
+
import { a as p } from "../../pack16-k4jq6aMX.js";
|
|
4
3
|
export {
|
|
5
|
-
|
|
4
|
+
p as unpackGradConfig
|
|
6
5
|
};
|
package/dist/ops/grads/utils.js
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
function
|
|
1
|
+
function e(t) {
|
|
2
2
|
return t.dtype = "float32", t;
|
|
3
3
|
}
|
|
4
|
-
function
|
|
4
|
+
function n(t) {
|
|
5
5
|
return t.dtype = "int32", t;
|
|
6
6
|
}
|
|
7
|
+
function r(t) {
|
|
8
|
+
return t.dtype = "packedF16", t;
|
|
9
|
+
}
|
|
7
10
|
export {
|
|
8
|
-
|
|
9
|
-
|
|
11
|
+
e as forceFloat,
|
|
12
|
+
n as forceInt,
|
|
13
|
+
r as forcePacked
|
|
10
14
|
};
|
package/dist/ops/matMul16.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import "../index-
|
|
2
|
-
import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-
|
|
1
|
+
import "../index-D0RBWjq8.js";
|
|
2
|
+
import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-cDxwemKj.js";
|
|
3
3
|
import "./webgl/matMul16.js";
|
|
4
4
|
import "./cpu/matMul16.js";
|
|
5
5
|
import "../utilities/packed.js";
|
|
6
|
-
import "../pack16-
|
|
6
|
+
import "../pack16-k4jq6aMX.js";
|
|
7
7
|
export {
|
|
8
8
|
p as matMul16,
|
|
9
9
|
u as matMul16Gelu,
|
package/dist/ops/matMulGelu.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as u } from "../index-
|
|
1
|
+
import { e as u } from "../index-D0RBWjq8.js";
|
|
2
2
|
import "./cpu/matMulGelu.js";
|
|
3
|
-
import "
|
|
3
|
+
import "../matMulGelu-B2s_80-H.js";
|
|
4
4
|
import "./grads/matMulGelu.js";
|
|
5
5
|
function M(r, e) {
|
|
6
6
|
return u().runKernel("MatMulGelu", { x: r, kernel: e });
|
package/dist/ops/matMulMul.js
CHANGED
package/dist/ops/mul16.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { m as t, e as u } from "../index-
|
|
1
|
+
import { m as t, e as u } from "../index-D0RBWjq8.js";
|
|
2
2
|
import { isPackedTensor as n } from "../utilities/packed.js";
|
|
3
3
|
function i(r, e) {
|
|
4
4
|
return !n(r) && !n(e) ? t(r, e) : u().runKernel("Mul16", { a: r, b: e });
|
package/dist/ops/mulDrop.js
CHANGED
package/dist/ops/normRMS.js
CHANGED
package/dist/ops/pack16.js
CHANGED
package/dist/ops/qkv.js
CHANGED
|
@@ -1,14 +1,10 @@
|
|
|
1
|
-
import { e as
|
|
1
|
+
import { e as t } from "../index-D0RBWjq8.js";
|
|
2
2
|
import "./cpu/qkv.js";
|
|
3
3
|
import "./webgl/qkv.js";
|
|
4
4
|
import "./grads/qkv.js";
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
const o = m().runKernel("QKV", { x: n, kernel: t }, { heads: e, packed: r });
|
|
8
|
-
return r && o.forEach((i) => {
|
|
9
|
-
f(i);
|
|
10
|
-
}), o;
|
|
5
|
+
function u(r, e, n, o = !1) {
|
|
6
|
+
return t().runKernel("QKV", { x: r, kernel: e }, { heads: n, packed: o });
|
|
11
7
|
}
|
|
12
8
|
export {
|
|
13
|
-
|
|
9
|
+
u as qkv
|
|
14
10
|
};
|
package/dist/ops/reshape16.js
CHANGED
|
@@ -1,43 +1,41 @@
|
|
|
1
|
-
import { e as
|
|
2
|
-
import {
|
|
3
|
-
|
|
4
|
-
import { a as l, r as t } from "../tensor_util-DfwaWayG.js";
|
|
5
|
-
const m = {
|
|
1
|
+
import { j as p, h as s, e as u } from "../index-D0RBWjq8.js";
|
|
2
|
+
import { r as c } from "../reshape-CkjKPPqB.js";
|
|
3
|
+
const i = {
|
|
6
4
|
kernelName: "Reshape16",
|
|
7
5
|
inputsToSave: ["x"],
|
|
8
6
|
gradFunc: (e, r) => {
|
|
9
7
|
const [n] = r;
|
|
10
8
|
if (Array.isArray(e))
|
|
11
9
|
throw new Error("Reshape16 gradient does not support multiple outputs.");
|
|
12
|
-
return { x: () =>
|
|
10
|
+
return { x: () => m(e, n.shape) };
|
|
13
11
|
}
|
|
14
12
|
};
|
|
15
|
-
|
|
13
|
+
p(i);
|
|
16
14
|
function a(e) {
|
|
17
|
-
const { inputs: r, attrs: n } = e, { x:
|
|
18
|
-
return
|
|
15
|
+
const { inputs: r, attrs: n } = e, { x: t } = r, { shape: o } = n;
|
|
16
|
+
return c(t, o);
|
|
19
17
|
}
|
|
20
|
-
const
|
|
18
|
+
const l = {
|
|
21
19
|
kernelName: "Reshape16",
|
|
22
20
|
backendName: "webgpu",
|
|
23
21
|
kernelFunc: a
|
|
24
22
|
};
|
|
25
|
-
|
|
26
|
-
const
|
|
23
|
+
s(l);
|
|
24
|
+
const h = {
|
|
27
25
|
kernelName: "Reshape16",
|
|
28
26
|
backendName: "webgl",
|
|
29
27
|
kernelFunc: a
|
|
30
28
|
};
|
|
31
|
-
|
|
32
|
-
const
|
|
29
|
+
s(h);
|
|
30
|
+
const g = {
|
|
33
31
|
kernelName: "Reshape16",
|
|
34
32
|
backendName: "cpu",
|
|
35
33
|
kernelFunc: a
|
|
36
34
|
};
|
|
37
|
-
|
|
38
|
-
function
|
|
39
|
-
return
|
|
35
|
+
s(g);
|
|
36
|
+
function m(e, r) {
|
|
37
|
+
return u().runKernel("Reshape16", { x: e }, { shape: r });
|
|
40
38
|
}
|
|
41
39
|
export {
|
|
42
|
-
|
|
40
|
+
m as reshape16
|
|
43
41
|
};
|
package/dist/ops/rope.d.ts
CHANGED
package/dist/ops/rope.js
CHANGED
|
@@ -1,12 +1,7 @@
|
|
|
1
|
-
import "../index-
|
|
2
|
-
import "../random_width-BVV9HveY.js";
|
|
3
|
-
import "../register_all_kernels-nvj2k7OC.js";
|
|
4
|
-
import "../index-Cp39cXWe.js";
|
|
5
|
-
import "../dataset-D2P7rHAw.js";
|
|
1
|
+
import "../index-D0RBWjq8.js";
|
|
6
2
|
import "./cpu/rope.js";
|
|
7
3
|
import "./webgl/rope.js";
|
|
8
|
-
import { r as
|
|
9
|
-
import "../utilities/packed.js";
|
|
4
|
+
import { r as i } from "../rope-BmZmp9uP.js";
|
|
10
5
|
export {
|
|
11
|
-
|
|
6
|
+
i as rope
|
|
12
7
|
};
|
package/dist/ops/scatterSub.js
CHANGED
package/dist/ops/slice16.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { isPackedTensor as n } from "../utilities/packed.js";
|
|
2
|
-
import { e as c } from "../index-
|
|
3
|
-
import { s as i } from "../slice-
|
|
2
|
+
import { e as c } from "../index-D0RBWjq8.js";
|
|
3
|
+
import { s as i } from "../slice-Aqy7KbJh.js";
|
|
4
4
|
function a(r, e, o) {
|
|
5
5
|
return n(r) ? c().runKernel("Slice16", { x: r }, { begin: e, size: o }) : i(r, e, o);
|
|
6
6
|
}
|
package/dist/ops/softmax16.js
CHANGED
|
@@ -1,12 +1,9 @@
|
|
|
1
|
-
import { e } from "../index-
|
|
1
|
+
import { e as n } from "../index-D0RBWjq8.js";
|
|
2
2
|
import "./grads/softmax16.js";
|
|
3
|
-
import { isPackedTensor as
|
|
4
|
-
function
|
|
5
|
-
|
|
6
|
-
return e().runKernel("Softmax", { logits: r }, { dim: r.rank - 1 });
|
|
7
|
-
const n = e().runKernel("Softmax16", { logits: r }, { dim: r.rank - 1 });
|
|
8
|
-
return m(r) ? a(n) : n;
|
|
3
|
+
import { isPackedTensor as e } from "../utilities/packed.js";
|
|
4
|
+
function t(r) {
|
|
5
|
+
return e(r) ? n().runKernel("Softmax16", { logits: r }, { dim: r.rank - 1 }) : n().runKernel("Softmax", { logits: r }, { dim: r.rank - 1 });
|
|
9
6
|
}
|
|
10
7
|
export {
|
|
11
|
-
|
|
8
|
+
t as softmax16
|
|
12
9
|
};
|
package/dist/ops/sub16.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { c as s, e as t } from "../index-
|
|
1
|
+
import { c as s, e as t } from "../index-D0RBWjq8.js";
|
|
2
2
|
import { isPackedTensor as n } from "../utilities/packed.js";
|
|
3
3
|
function c(r, e) {
|
|
4
4
|
return !n(r) && !n(e) ? s(r, e) : t().runKernel("Sub16", { a: r, b: e });
|
package/dist/ops/sum16.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as t } from "../index-
|
|
1
|
+
import { e as t } from "../index-D0RBWjq8.js";
|
|
2
2
|
import { isPackedTensor as s } from "../utilities/packed.js";
|
|
3
|
-
import { s as n } from "../sum-
|
|
3
|
+
import { s as n } from "../sum-BdplSvq_.js";
|
|
4
4
|
function p(r, o, e = !1) {
|
|
5
5
|
if (!s(r))
|
|
6
6
|
return n(r, o, e);
|
package/dist/ops/transpose16.js
CHANGED
|
@@ -1,41 +1,40 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import { g
|
|
4
|
-
import { isPackedTensor as f
|
|
5
|
-
import { t as a } from "../transpose-
|
|
6
|
-
|
|
7
|
-
const k = {
|
|
1
|
+
import { j as i, h as p, e as u } from "../index-D0RBWjq8.js";
|
|
2
|
+
import { forcePacked as l, forceFloat as m } from "./grads/utils.js";
|
|
3
|
+
import { g } from "../axis_util-DofAuy0p.js";
|
|
4
|
+
import { isPackedTensor as f } from "../utilities/packed.js";
|
|
5
|
+
import { t as a } from "../transpose-JawVKyZy.js";
|
|
6
|
+
const d = {
|
|
8
7
|
kernelName: "Transpose16",
|
|
9
|
-
gradFunc: (
|
|
10
|
-
if (Array.isArray(
|
|
8
|
+
gradFunc: (r, s, t) => {
|
|
9
|
+
if (Array.isArray(r))
|
|
11
10
|
throw new Error("Transpose16 gradient does not support multiple outputs.");
|
|
12
|
-
const n =
|
|
13
|
-
return { x: () =>
|
|
11
|
+
const n = t, { perm: e } = n, o = g(e);
|
|
12
|
+
return { x: () => T(r, o) };
|
|
14
13
|
}
|
|
15
14
|
};
|
|
16
|
-
d
|
|
17
|
-
function c(
|
|
18
|
-
const { inputs: s, attrs:
|
|
19
|
-
if (
|
|
15
|
+
i(d);
|
|
16
|
+
function c(r) {
|
|
17
|
+
const { inputs: s, attrs: t } = r, { x: n } = s, { perm: e } = t, o = f(n);
|
|
18
|
+
if (o && e[e.length - 1] !== n.shape.length - 1)
|
|
20
19
|
throw new Error("Transpose16 currently only supports the last axis being unchanged.");
|
|
21
|
-
return
|
|
20
|
+
return o ? l(a(m(n), e)) : a(n, e);
|
|
22
21
|
}
|
|
23
|
-
const
|
|
22
|
+
const k = {
|
|
24
23
|
kernelName: "Transpose16",
|
|
25
24
|
backendName: "webgl",
|
|
26
25
|
kernelFunc: c
|
|
27
26
|
};
|
|
28
|
-
p(
|
|
29
|
-
const
|
|
27
|
+
p(k);
|
|
28
|
+
const h = {
|
|
30
29
|
kernelName: "Transpose16",
|
|
31
30
|
backendName: "cpu",
|
|
32
31
|
kernelFunc: c
|
|
33
32
|
};
|
|
34
|
-
p(
|
|
35
|
-
function
|
|
36
|
-
return s == null && (s =
|
|
33
|
+
p(h);
|
|
34
|
+
function T(r, s) {
|
|
35
|
+
return s == null && (s = r.shape.map((n, e) => e).reverse()), u().runKernel("Transpose16", { x: r }, { perm: s });
|
|
37
36
|
}
|
|
38
37
|
export {
|
|
39
|
-
|
|
40
|
-
|
|
38
|
+
T as transpose16,
|
|
39
|
+
d as transpose16GradConfig
|
|
41
40
|
};
|
package/dist/ops/unpack16.js
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import { r as n } from "../../Reshape-
|
|
2
|
-
import "../../index-
|
|
3
|
-
import { r as f } from "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import { r as n } from "../../Reshape-Ct266DEk.js";
|
|
2
|
+
import { h as f } from "../../index-D0RBWjq8.js";
|
|
4
3
|
class v {
|
|
5
4
|
variableNames = ["moments", "value"];
|
|
6
5
|
outputShape;
|
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
|
|
3
|
-
class h {
|
|
1
|
+
import { h } from "../../index-D0RBWjq8.js";
|
|
2
|
+
class m {
|
|
4
3
|
variableNames = ["q", "k"];
|
|
5
4
|
outputShape;
|
|
6
5
|
userCode;
|
|
@@ -35,12 +34,12 @@ class h {
|
|
|
35
34
|
}
|
|
36
35
|
}
|
|
37
36
|
function l(o) {
|
|
38
|
-
const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3],
|
|
39
|
-
return a.runWebGLProgram(
|
|
37
|
+
const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], d = new m(i, u, r, c, p);
|
|
38
|
+
return a.runWebGLProgram(d, [t, e], "float32", [[s], [n], [Number.NEGATIVE_INFINITY]]);
|
|
40
39
|
}
|
|
41
40
|
const f = {
|
|
42
41
|
kernelName: "AttentionMask",
|
|
43
42
|
backendName: "webgl",
|
|
44
43
|
kernelFunc: l
|
|
45
44
|
};
|
|
46
|
-
|
|
45
|
+
h(f);
|
|
@@ -1,9 +1,7 @@
|
|
|
1
|
-
import { m as b, s as I, r as k } from "../../RealDiv-
|
|
2
|
-
import { r as v } from "../../Reshape-
|
|
3
|
-
import "../../index-
|
|
4
|
-
import {
|
|
5
|
-
import { p as P } from "../../tensor-CzmOBsdf.js";
|
|
6
|
-
import { e as S } from "../../axis_util-BsIr9ZNu.js";
|
|
1
|
+
import { m as b, s as I, r as k } from "../../RealDiv-Ds-jvL09.js";
|
|
2
|
+
import { r as v } from "../../Reshape-Ct266DEk.js";
|
|
3
|
+
import { h as w, af as P } from "../../index-D0RBWjq8.js";
|
|
4
|
+
import { e as S } from "../../axis_util-DofAuy0p.js";
|
|
7
5
|
class T {
|
|
8
6
|
variableNames = ["logits", "maxLogits"];
|
|
9
7
|
outputShape;
|
|
@@ -62,11 +60,11 @@ function L(r) {
|
|
|
62
60
|
o.disposeIntermediateTensorInfo(d);
|
|
63
61
|
const p = I({ inputs: { x: s }, backend: o, attrs: { axis: i, keepDims: !1 } }), a = v({ inputs: { x: p }, backend: o, attrs: { shape: f } });
|
|
64
62
|
if (n !== void 0 && n > 0) {
|
|
65
|
-
const
|
|
63
|
+
const h = new C(e.shape), g = o.runWebGLProgram(h, [s, a], "float32", [
|
|
66
64
|
[n],
|
|
67
65
|
[c ?? Math.random() * 1e4]
|
|
68
66
|
]);
|
|
69
|
-
return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a),
|
|
67
|
+
return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a), g;
|
|
70
68
|
}
|
|
71
69
|
const x = k({ inputs: { a: s, b: a }, backend: o });
|
|
72
70
|
return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a), x;
|
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
|
|
3
|
-
class l {
|
|
1
|
+
import { h as l } from "../../index-D0RBWjq8.js";
|
|
2
|
+
class u {
|
|
4
3
|
variableNames = ["labels", "logits", "values"];
|
|
5
4
|
outputShape;
|
|
6
5
|
userCode;
|
|
@@ -16,13 +15,13 @@ class l {
|
|
|
16
15
|
`;
|
|
17
16
|
}
|
|
18
17
|
}
|
|
19
|
-
function
|
|
20
|
-
const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new
|
|
18
|
+
function i(t) {
|
|
19
|
+
const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new u(r);
|
|
21
20
|
return a.runWebGLProgram(n, [o, e, s], "float32");
|
|
22
21
|
}
|
|
23
22
|
const c = {
|
|
24
23
|
kernelName: "EfficientGatherSub",
|
|
25
24
|
backendName: "webgl",
|
|
26
|
-
kernelFunc:
|
|
25
|
+
kernelFunc: i
|
|
27
26
|
};
|
|
28
|
-
|
|
27
|
+
l(c);
|
package/dist/ops/webgl/gelu.js
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { u as s, C as i } from "../../kernel_funcs_utils-
|
|
3
|
-
import { r as a } from "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import { h as a } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { u as s, C as i } from "../../kernel_funcs_utils-CSaumNDs.js";
|
|
4
3
|
const t = 0.7978845608028654, r = 0.044715, c = i + `
|
|
5
4
|
float x3 = x * x * x;
|
|
6
5
|
float inner = x + ${r} * x3;
|