@genai-fi/nanogpt 0.10.2 → 0.10.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.js +11761 -171
- package/dist/{RealDiv-zz7FpkKX.js → RealDiv-KAPDe8zB.js} +23 -25
- package/dist/Reshape-BYkmUnAv.js +14 -0
- package/dist/{Reshape-CHdUjC72.js → Reshape-Zt6eb7yh.js} +18 -20
- package/dist/TeachableLLM.js +10 -11
- package/dist/{axis_util-BsIr9ZNu.js → axis_util-BaG7mf5A.js} +3 -3
- package/dist/backend.js +2 -2
- package/dist/{backend_util-B1XRLuq9.js → backend_util-RCe-rHaj.js} +72 -73
- package/dist/{backend_webgpu-CqpfEImu.js → backend_webgpu-DE3ACOLx.js} +45 -47
- package/dist/broadcast_to-B3eYlZm7.js +28 -0
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +7 -11
- package/dist/checks/normRMS.js +9 -9
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +12 -13
- package/dist/checks/rope.js +2 -2
- package/dist/clip_by_value-BnO7-a88.js +12 -0
- package/dist/complex-DjxcVmoX.js +11 -0
- package/dist/concat-BV8bt5H-.js +17 -0
- package/dist/{concat_util-iBYIyuQe.js → concat_util-DpW8mL_l.js} +1 -1
- package/dist/{dataset-D2P7rHAw.js → dataset-BcwmTGYc.js} +137 -139
- package/dist/dropout-BcvN9JYi.js +92 -0
- package/dist/expand_dims-DT4tEPwA.js +11 -0
- package/dist/{exports_initializers-CZSUJoVE.js → exports_initializers-Hta_rEnm.js} +1 -1
- package/dist/floor-D5QdR_le.js +9 -0
- package/dist/gather-D3JcZUaI.js +9 -0
- package/dist/{gelu-Bmhopi0J.js → gelu-CjNPL4OH.js} +10 -11
- package/dist/{gpgpu_math-DsCcikas.js → gpgpu_math-DAOmgtXR.js} +841 -1015
- package/dist/{index-DRyE072i.js → index-BwexR4lA.js} +262 -263
- package/dist/index-DOvlwCh-.js +3520 -0
- package/dist/{kernel_funcs_utils-CWfOAPGO.js → kernel_funcs_utils-CCzYdUZg.js} +130 -132
- package/dist/layers/BaseLayer.js +15 -16
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +7 -7
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +9 -9
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +13 -14
- package/dist/log_sum_exp-ngO0-4pK.js +39 -0
- package/dist/main.js +49 -50
- package/dist/{matMul16-fEAJ4smh.js → matMul16-BWRSOCWB.js} +14 -15
- package/dist/matMulGelu-CzfgT6Wq.js +163 -0
- package/dist/mat_mul-SjpJRLyL.js +11 -0
- package/dist/mod-AnXEvvpo.js +11 -0
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +13 -14
- package/dist/ones-D2rT0xk2.js +14 -0
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +13 -14
- package/dist/ops/cpu/adamMoments.js +6 -7
- package/dist/ops/cpu/appendCache.js +7 -8
- package/dist/ops/cpu/attentionMask.js +7 -7
- package/dist/ops/cpu/fusedSoftmax.js +10 -11
- package/dist/ops/cpu/gatherSub.js +9 -10
- package/dist/ops/cpu/gelu.js +9 -10
- package/dist/ops/cpu/matMul16.js +6 -7
- package/dist/ops/cpu/matMulGelu.js +5 -6
- package/dist/ops/cpu/matMulMul.js +3 -4
- package/dist/ops/cpu/mulDropout.js +3 -4
- package/dist/ops/cpu/normRMS.js +10 -11
- package/dist/ops/cpu/qkv.js +8 -9
- package/dist/ops/cpu/rope.js +5 -6
- package/dist/ops/cpu/scatterSub.js +17 -19
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +11 -12
- package/dist/ops/grads/attentionMask.js +5 -6
- package/dist/ops/grads/gelu.js +3 -4
- package/dist/ops/grads/matMul16.js +4 -5
- package/dist/ops/grads/matMulGelu.js +9 -10
- package/dist/ops/grads/normRMS.js +7 -8
- package/dist/ops/grads/pack16.js +4 -5
- package/dist/ops/grads/qkv.js +17 -19
- package/dist/ops/grads/rope.js +3 -5
- package/dist/ops/grads/softmax16.js +3 -4
- package/dist/ops/grads/unpack16.js +3 -4
- package/dist/ops/grads/utils.d.ts +1 -0
- package/dist/ops/grads/utils.js +8 -4
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +2 -2
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +3 -4
- package/dist/ops/qkv.js +4 -8
- package/dist/ops/reshape16.js +14 -16
- package/dist/ops/rope.d.ts +1 -1
- package/dist/ops/rope.js +3 -8
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +5 -8
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +23 -24
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +2 -3
- package/dist/ops/webgl/adamMoments.js +1 -2
- package/dist/ops/webgl/appendCache.js +1 -2
- package/dist/ops/webgl/attentionMask.js +4 -5
- package/dist/ops/webgl/fusedSoftmax.js +4 -6
- package/dist/ops/webgl/gatherSub.js +6 -7
- package/dist/ops/webgl/gelu.js +2 -3
- package/dist/ops/webgl/log.js +11 -12
- package/dist/ops/webgl/matMul16.js +10 -11
- package/dist/ops/webgl/matMulGelu.js +7 -111
- package/dist/ops/webgl/matMulMul.js +9 -10
- package/dist/ops/webgl/mulDropout.js +8 -9
- package/dist/ops/webgl/normRMS.js +2 -3
- package/dist/ops/webgl/qkv.js +5 -6
- package/dist/ops/webgl/rope.js +7 -8
- package/dist/ops/webgl/scatterSub.js +5 -6
- package/dist/ops/webgpu/adamAdjust.js +10 -12
- package/dist/ops/webgpu/adamMoments.js +8 -10
- package/dist/ops/webgpu/add16.js +8 -9
- package/dist/ops/webgpu/appendCache.js +23 -25
- package/dist/ops/webgpu/attentionMask.js +8 -10
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +12 -14
- package/dist/ops/webgpu/gatherSub.js +11 -13
- package/dist/ops/webgpu/gelu.js +28 -29
- package/dist/ops/webgpu/matMul16.js +26 -28
- package/dist/ops/webgpu/matMul16_program.js +4 -5
- package/dist/ops/webgpu/mul16.js +9 -10
- package/dist/ops/webgpu/normRMS.js +15 -17
- package/dist/ops/webgpu/normRMSGrad.js +21 -28
- package/dist/ops/webgpu/pack16.js +12 -13
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +16 -18
- package/dist/ops/webgpu/rope.js +25 -27
- package/dist/ops/webgpu/scatterSub.js +7 -9
- package/dist/ops/webgpu/slice16.js +21 -23
- package/dist/ops/webgpu/softmax16.js +17 -19
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +7 -8
- package/dist/ops/webgpu/sub16.js +7 -8
- package/dist/ops/webgpu/sum16.js +18 -20
- package/dist/ops/webgpu/transpose16.js +19 -20
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +11 -12
- package/dist/ops/webgpu/unpack16.js +3 -4
- package/dist/ops/webgpu/utils/binary_op.js +7 -8
- package/dist/ops/webgpu/utils/reductions.js +14 -22
- package/dist/ops-B5yanEdW.js +476 -0
- package/dist/pack16-nQ6JaLo-.js +39 -0
- package/dist/patches/webgpu_backend.js +19 -20
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +21 -22
- package/dist/{random_width-BVV9HveY.js → random_width-or-CEftb.js} +2506 -2761
- package/dist/range-BklejeeW.js +10 -0
- package/dist/relu-CP0ZcxWO.js +9 -0
- package/dist/reshape-ByE68wS9.js +9 -0
- package/dist/resize_nearest_neighbor-B19mCEg2.js +175 -0
- package/dist/rope-Ir4mTyD1.js +24 -0
- package/dist/{scatter_nd_util-C7zXRT_h.js → scatter_nd_util-lvSiX8q4.js} +1 -1
- package/dist/selu_util-kbhpTdYD.js +44 -0
- package/dist/{shared-CHhxz-O5.js → shared-DT1TkE6w.js} +1 -1
- package/dist/{shared-D2NP_CpY.js → shared-dntlHIDQ.js} +343 -345
- package/dist/slice-BfEGSH82.js +12 -0
- package/dist/{slice_util-DyjSAD0u.js → slice_util-uTKwiEpW.js} +1 -1
- package/dist/{softmax-C9JQEtnO.js → softmax-CA5jFsLR.js} +4 -5
- package/dist/split-CVLc0w--.js +9 -0
- package/dist/squeeze-C7Z2srUo.js +10 -0
- package/dist/stack-Cf4n9h0N.js +11 -0
- package/dist/step-CINUs5QB.js +261 -0
- package/dist/sum-DWAtNGez.js +11 -0
- package/dist/tensor-DJoc7gJU.js +8 -0
- package/dist/tensor1d-D11P_7Dp.js +11 -0
- package/dist/{tensor2d-CSB4KOb0.js → tensor2d-Bs9wZRc7.js} +6 -7
- package/dist/{tensor4d-D7bLqGqz.js → tensor4d-BARPdTaS.js} +6 -7
- package/dist/{tfjs_backend-CNkSTL0c.js → tfjs_backend-y1cvNhLA.js} +255 -264
- package/dist/tile-mbfagpsB.js +11 -0
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.js +2 -2
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.js +2 -2
- package/dist/training/sparseCrossEntropy.js +5 -5
- package/dist/transpose-ClWiBS_b.js +36 -0
- package/dist/unsorted_segment_sum-BDDhB_E6.js +277 -0
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.d.ts +1 -4
- package/dist/utilities/packed.js +10 -745
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.js +5 -5
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-DzfrwYuP.js → variable-WawDEaAb.js} +1 -1
- package/dist/{webgpu_program-DzaQiqel.js → webgpu_program-DuOXPQol.js} +178 -172
- package/dist/{webgpu_util-0_ubCEHJ.js → webgpu_util-RxEF33Rj.js} +34 -35
- package/dist/zeros-KnWaWf-X.js +13 -0
- package/dist/zeros_like-DvE73F4e.js +721 -0
- package/package.json +4 -2
- package/dist/Reshape-CDVLyVfz.js +0 -16
- package/dist/broadcast_to-B0ChcDaz.js +0 -30
- package/dist/complex-BBiRlsVq.js +0 -13
- package/dist/concat-DmBLPVGC.js +0 -19
- package/dist/dropout-B1x1kYMa.js +0 -99
- package/dist/expand_dims-ouvfxQ1n.js +0 -13
- package/dist/gather-CH9sdacz.js +0 -10
- package/dist/index-D6Q1lPZO.js +0 -2157
- package/dist/log_sum_exp-D3ftBNY5.js +0 -41
- package/dist/mat_mul-C59XWcJd.js +0 -12
- package/dist/mod-DESSvHIU.js +0 -12
- package/dist/mulmat_packed_gpu-Coh6qbJk.js +0 -55
- package/dist/ones-jU9jlQvM.js +0 -15
- package/dist/ops-BFDtP6th.js +0 -645
- package/dist/pack16-CmVZs6af.js +0 -41
- package/dist/patches/PackedTensor.d.ts +0 -12
- package/dist/patches/PackedTensor.js +0 -11
- package/dist/patches/engine.d.ts +0 -261
- package/dist/patches/engine.js +0 -12
- package/dist/patches/tape.d.ts +0 -12
- package/dist/patches/tape.js +0 -5
- package/dist/range-ZZZD60Fx.js +0 -11
- package/dist/reciprocal-CrYlsAGD.js +0 -10
- package/dist/register_all_kernels-nvj2k7OC.js +0 -12307
- package/dist/relu-BYDneVPn.js +0 -10
- package/dist/reshape-CaPQzFvz.js +0 -10
- package/dist/rope-s4W2XO9B.js +0 -32
- package/dist/selu_util-BGPXmd4B.js +0 -303
- package/dist/sin-Djs4aQiu.js +0 -16
- package/dist/slice-DvovR5wq.js +0 -13
- package/dist/split-DBck65sX.js +0 -10
- package/dist/squeeze-C00Ipm_7.js +0 -11
- package/dist/stack-ChnHwRpX.js +0 -13
- package/dist/sum-ywRJj3Zr.js +0 -12
- package/dist/tensor-0r5yOo2R.js +0 -8
- package/dist/tensor-CzmOBsdf.js +0 -909
- package/dist/tensor1d-BlUT89BP.js +0 -12
- package/dist/tensor_util-DfwaWayG.js +0 -523
- package/dist/tile-CR074jmp.js +0 -13
- package/dist/transpose-DH4gmHvu.js +0 -38
- package/dist/zeros-DBFVbpv5.js +0 -14
package/dist/ops/grads/qkv.js
CHANGED
|
@@ -1,36 +1,34 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as
|
|
3
|
-
import { concat16 as
|
|
4
|
-
import { sum16 as
|
|
5
|
-
import {
|
|
6
|
-
|
|
7
|
-
import { s as G } from "../../squeeze-C00Ipm_7.js";
|
|
8
|
-
const m = {
|
|
1
|
+
import { u as c } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { a as f } from "../../matMul16-BWRSOCWB.js";
|
|
3
|
+
import { concat16 as g } from "../concat16.js";
|
|
4
|
+
import { sum16 as l } from "../sum16.js";
|
|
5
|
+
import { s as k } from "../../squeeze-C7Z2srUo.js";
|
|
6
|
+
const i = {
|
|
9
7
|
kernelName: "QKV",
|
|
10
8
|
inputsToSave: ["x", "kernel"],
|
|
11
9
|
outputsToSave: [],
|
|
12
10
|
gradFunc: (e, s) => {
|
|
13
|
-
const [
|
|
14
|
-
|
|
15
|
-
const
|
|
11
|
+
const [r, n, t] = e, [a] = s, p = g([r, n, t], 1);
|
|
12
|
+
r.dispose(), n.dispose(), t.dispose();
|
|
13
|
+
const m = [a.shape[0], a.shape[1], 3 * a.shape[2]], d = f.gradFunc(p, s, {
|
|
16
14
|
transposeA: !1,
|
|
17
15
|
transposeB: !1,
|
|
18
|
-
originalShape:
|
|
16
|
+
originalShape: m,
|
|
19
17
|
perm: [0, 2, 1, 3]
|
|
20
18
|
});
|
|
21
19
|
return p.dispose(), {
|
|
22
|
-
x: () =>
|
|
20
|
+
x: () => d.A(),
|
|
23
21
|
kernel: () => {
|
|
24
|
-
const
|
|
25
|
-
return
|
|
22
|
+
const o = d.B(), u = o.shape[0] === 1 ? k(o, [0]) : l(o, 0);
|
|
23
|
+
return o.dispose(), u;
|
|
26
24
|
}
|
|
27
25
|
};
|
|
28
26
|
}
|
|
29
27
|
};
|
|
30
|
-
function
|
|
31
|
-
return
|
|
28
|
+
function B(e, s, r) {
|
|
29
|
+
return i.gradFunc(e, [s, r], {});
|
|
32
30
|
}
|
|
33
|
-
|
|
31
|
+
c(i);
|
|
34
32
|
export {
|
|
35
|
-
|
|
33
|
+
B as qkvGrad
|
|
36
34
|
};
|
package/dist/ops/grads/rope.js
CHANGED
|
@@ -1,7 +1,5 @@
|
|
|
1
|
-
import "../../
|
|
2
|
-
import "../../
|
|
3
|
-
import { a as t } from "../../rope-s4W2XO9B.js";
|
|
4
|
-
import "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import "../../index-DOvlwCh-.js";
|
|
2
|
+
import { a as p } from "../../rope-Ir4mTyD1.js";
|
|
5
3
|
export {
|
|
6
|
-
|
|
4
|
+
p as ropeGradConfig
|
|
7
5
|
};
|
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { u as n, e as a } from "../../index-DOvlwCh-.js";
|
|
2
2
|
import { isPackedTensor as t } from "../../utilities/packed.js";
|
|
3
|
-
import { a } from "../../tensor_util-DfwaWayG.js";
|
|
4
3
|
function s(r, e) {
|
|
5
|
-
return
|
|
4
|
+
return a().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
|
|
6
5
|
}
|
|
7
6
|
const i = {
|
|
8
7
|
kernelName: "Softmax16",
|
|
@@ -20,7 +19,7 @@ const i = {
|
|
|
20
19
|
};
|
|
21
20
|
}
|
|
22
21
|
};
|
|
23
|
-
|
|
22
|
+
n(i);
|
|
24
23
|
export {
|
|
25
24
|
i as softmax16GradConfig
|
|
26
25
|
};
|
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as
|
|
3
|
-
import "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import "../../index-DOvlwCh-.js";
|
|
2
|
+
import { a as p } from "../../pack16-nQ6JaLo-.js";
|
|
4
3
|
export {
|
|
5
|
-
|
|
4
|
+
p as unpackGradConfig
|
|
6
5
|
};
|
package/dist/ops/grads/utils.js
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
function
|
|
1
|
+
function e(t) {
|
|
2
2
|
return t.dtype = "float32", t;
|
|
3
3
|
}
|
|
4
|
-
function
|
|
4
|
+
function n(t) {
|
|
5
5
|
return t.dtype = "int32", t;
|
|
6
6
|
}
|
|
7
|
+
function r(t) {
|
|
8
|
+
return t.dtype = "packedF16", t;
|
|
9
|
+
}
|
|
7
10
|
export {
|
|
8
|
-
|
|
9
|
-
|
|
11
|
+
e as forceFloat,
|
|
12
|
+
n as forceInt,
|
|
13
|
+
r as forcePacked
|
|
10
14
|
};
|
package/dist/ops/matMul16.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import "../index-
|
|
2
|
-
import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-
|
|
1
|
+
import "../index-DOvlwCh-.js";
|
|
2
|
+
import { b as p, c as u, d as i, e as s, m as M } from "../matMul16-BWRSOCWB.js";
|
|
3
3
|
import "./webgl/matMul16.js";
|
|
4
4
|
import "./cpu/matMul16.js";
|
|
5
5
|
import "../utilities/packed.js";
|
|
6
|
-
import "../pack16-
|
|
6
|
+
import "../pack16-nQ6JaLo-.js";
|
|
7
7
|
export {
|
|
8
8
|
p as matMul16,
|
|
9
9
|
u as matMul16Gelu,
|
package/dist/ops/matMulGelu.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as u } from "../index-
|
|
1
|
+
import { e as u } from "../index-DOvlwCh-.js";
|
|
2
2
|
import "./cpu/matMulGelu.js";
|
|
3
|
-
import "
|
|
3
|
+
import "../matMulGelu-CzfgT6Wq.js";
|
|
4
4
|
import "./grads/matMulGelu.js";
|
|
5
5
|
function M(r, e) {
|
|
6
6
|
return u().runKernel("MatMulGelu", { x: r, kernel: e });
|
package/dist/ops/matMulMul.js
CHANGED
package/dist/ops/mul16.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { m as t, e as u } from "../index-
|
|
1
|
+
import { m as t, e as u } from "../index-DOvlwCh-.js";
|
|
2
2
|
import { isPackedTensor as n } from "../utilities/packed.js";
|
|
3
3
|
function i(r, e) {
|
|
4
4
|
return !n(r) && !n(e) ? t(r, e) : u().runKernel("Mul16", { a: r, b: e });
|
package/dist/ops/mulDrop.js
CHANGED
package/dist/ops/normRMS.js
CHANGED
package/dist/ops/pack16.js
CHANGED
package/dist/ops/qkv.js
CHANGED
|
@@ -1,14 +1,10 @@
|
|
|
1
|
-
import { e as
|
|
1
|
+
import { e as t } from "../index-DOvlwCh-.js";
|
|
2
2
|
import "./cpu/qkv.js";
|
|
3
3
|
import "./webgl/qkv.js";
|
|
4
4
|
import "./grads/qkv.js";
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
const o = m().runKernel("QKV", { x: n, kernel: t }, { heads: e, packed: r });
|
|
8
|
-
return r && o.forEach((i) => {
|
|
9
|
-
f(i);
|
|
10
|
-
}), o;
|
|
5
|
+
function u(r, e, n, o = !1) {
|
|
6
|
+
return t().runKernel("QKV", { x: r, kernel: e }, { heads: n, packed: o });
|
|
11
7
|
}
|
|
12
8
|
export {
|
|
13
|
-
|
|
9
|
+
u as qkv
|
|
14
10
|
};
|
package/dist/ops/reshape16.js
CHANGED
|
@@ -1,43 +1,41 @@
|
|
|
1
|
-
import { e as
|
|
2
|
-
import {
|
|
3
|
-
|
|
4
|
-
import { a as l, r as t } from "../tensor_util-DfwaWayG.js";
|
|
5
|
-
const m = {
|
|
1
|
+
import { u as p, p as s, e as u } from "../index-DOvlwCh-.js";
|
|
2
|
+
import { r as c } from "../reshape-ByE68wS9.js";
|
|
3
|
+
const i = {
|
|
6
4
|
kernelName: "Reshape16",
|
|
7
5
|
inputsToSave: ["x"],
|
|
8
6
|
gradFunc: (e, r) => {
|
|
9
7
|
const [n] = r;
|
|
10
8
|
if (Array.isArray(e))
|
|
11
9
|
throw new Error("Reshape16 gradient does not support multiple outputs.");
|
|
12
|
-
return { x: () =>
|
|
10
|
+
return { x: () => m(e, n.shape) };
|
|
13
11
|
}
|
|
14
12
|
};
|
|
15
|
-
|
|
13
|
+
p(i);
|
|
16
14
|
function a(e) {
|
|
17
|
-
const { inputs: r, attrs: n } = e, { x:
|
|
18
|
-
return
|
|
15
|
+
const { inputs: r, attrs: n } = e, { x: t } = r, { shape: o } = n;
|
|
16
|
+
return c(t, o);
|
|
19
17
|
}
|
|
20
|
-
const
|
|
18
|
+
const l = {
|
|
21
19
|
kernelName: "Reshape16",
|
|
22
20
|
backendName: "webgpu",
|
|
23
21
|
kernelFunc: a
|
|
24
22
|
};
|
|
25
|
-
|
|
23
|
+
s(l);
|
|
26
24
|
const g = {
|
|
27
25
|
kernelName: "Reshape16",
|
|
28
26
|
backendName: "webgl",
|
|
29
27
|
kernelFunc: a
|
|
30
28
|
};
|
|
31
|
-
|
|
29
|
+
s(g);
|
|
32
30
|
const h = {
|
|
33
31
|
kernelName: "Reshape16",
|
|
34
32
|
backendName: "cpu",
|
|
35
33
|
kernelFunc: a
|
|
36
34
|
};
|
|
37
|
-
|
|
38
|
-
function
|
|
39
|
-
return
|
|
35
|
+
s(h);
|
|
36
|
+
function m(e, r) {
|
|
37
|
+
return u().runKernel("Reshape16", { x: e }, { shape: r });
|
|
40
38
|
}
|
|
41
39
|
export {
|
|
42
|
-
|
|
40
|
+
m as reshape16
|
|
43
41
|
};
|
package/dist/ops/rope.d.ts
CHANGED
package/dist/ops/rope.js
CHANGED
|
@@ -1,12 +1,7 @@
|
|
|
1
|
-
import "../index-
|
|
2
|
-
import "../random_width-BVV9HveY.js";
|
|
3
|
-
import "../register_all_kernels-nvj2k7OC.js";
|
|
4
|
-
import "../index-Cp39cXWe.js";
|
|
5
|
-
import "../dataset-D2P7rHAw.js";
|
|
1
|
+
import "../index-DOvlwCh-.js";
|
|
6
2
|
import "./cpu/rope.js";
|
|
7
3
|
import "./webgl/rope.js";
|
|
8
|
-
import { r as
|
|
9
|
-
import "../utilities/packed.js";
|
|
4
|
+
import { r as i } from "../rope-Ir4mTyD1.js";
|
|
10
5
|
export {
|
|
11
|
-
|
|
6
|
+
i as rope
|
|
12
7
|
};
|
package/dist/ops/scatterSub.js
CHANGED
package/dist/ops/slice16.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { isPackedTensor as n } from "../utilities/packed.js";
|
|
2
|
-
import { e as c } from "../index-
|
|
3
|
-
import { s as i } from "../slice-
|
|
2
|
+
import { e as c } from "../index-DOvlwCh-.js";
|
|
3
|
+
import { s as i } from "../slice-BfEGSH82.js";
|
|
4
4
|
function a(r, e, o) {
|
|
5
5
|
return n(r) ? c().runKernel("Slice16", { x: r }, { begin: e, size: o }) : i(r, e, o);
|
|
6
6
|
}
|
package/dist/ops/softmax16.js
CHANGED
|
@@ -1,12 +1,9 @@
|
|
|
1
|
-
import { e } from "../index-
|
|
1
|
+
import { e as n } from "../index-DOvlwCh-.js";
|
|
2
2
|
import "./grads/softmax16.js";
|
|
3
|
-
import { isPackedTensor as
|
|
4
|
-
function
|
|
5
|
-
|
|
6
|
-
return e().runKernel("Softmax", { logits: r }, { dim: r.rank - 1 });
|
|
7
|
-
const n = e().runKernel("Softmax16", { logits: r }, { dim: r.rank - 1 });
|
|
8
|
-
return m(r) ? a(n) : n;
|
|
3
|
+
import { isPackedTensor as e } from "../utilities/packed.js";
|
|
4
|
+
function t(r) {
|
|
5
|
+
return e(r) ? n().runKernel("Softmax16", { logits: r }, { dim: r.rank - 1 }) : n().runKernel("Softmax", { logits: r }, { dim: r.rank - 1 });
|
|
9
6
|
}
|
|
10
7
|
export {
|
|
11
|
-
|
|
8
|
+
t as softmax16
|
|
12
9
|
};
|
package/dist/ops/sub16.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { c as s, e as t } from "../index-
|
|
1
|
+
import { c as s, e as t } from "../index-DOvlwCh-.js";
|
|
2
2
|
import { isPackedTensor as n } from "../utilities/packed.js";
|
|
3
3
|
function c(r, e) {
|
|
4
4
|
return !n(r) && !n(e) ? s(r, e) : t().runKernel("Sub16", { a: r, b: e });
|
package/dist/ops/sum16.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as t } from "../index-
|
|
1
|
+
import { e as t } from "../index-DOvlwCh-.js";
|
|
2
2
|
import { isPackedTensor as s } from "../utilities/packed.js";
|
|
3
|
-
import { s as n } from "../sum-
|
|
3
|
+
import { s as n } from "../sum-DWAtNGez.js";
|
|
4
4
|
function p(r, o, e = !1) {
|
|
5
5
|
if (!s(r))
|
|
6
6
|
return n(r, o, e);
|
package/dist/ops/transpose16.js
CHANGED
|
@@ -1,41 +1,40 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import { g
|
|
4
|
-
import { isPackedTensor as f
|
|
5
|
-
import { t as a } from "../transpose-
|
|
6
|
-
|
|
7
|
-
const k = {
|
|
1
|
+
import { u as i, p, e as u } from "../index-DOvlwCh-.js";
|
|
2
|
+
import { forcePacked as l, forceFloat as m } from "./grads/utils.js";
|
|
3
|
+
import { g } from "../axis_util-BaG7mf5A.js";
|
|
4
|
+
import { isPackedTensor as f } from "../utilities/packed.js";
|
|
5
|
+
import { t as a } from "../transpose-ClWiBS_b.js";
|
|
6
|
+
const d = {
|
|
8
7
|
kernelName: "Transpose16",
|
|
9
|
-
gradFunc: (
|
|
10
|
-
if (Array.isArray(
|
|
8
|
+
gradFunc: (r, s, t) => {
|
|
9
|
+
if (Array.isArray(r))
|
|
11
10
|
throw new Error("Transpose16 gradient does not support multiple outputs.");
|
|
12
|
-
const n =
|
|
13
|
-
return { x: () =>
|
|
11
|
+
const n = t, { perm: e } = n, o = g(e);
|
|
12
|
+
return { x: () => T(r, o) };
|
|
14
13
|
}
|
|
15
14
|
};
|
|
16
|
-
d
|
|
17
|
-
function c(
|
|
18
|
-
const { inputs: s, attrs:
|
|
19
|
-
if (
|
|
15
|
+
i(d);
|
|
16
|
+
function c(r) {
|
|
17
|
+
const { inputs: s, attrs: t } = r, { x: n } = s, { perm: e } = t, o = f(n);
|
|
18
|
+
if (o && e[e.length - 1] !== n.shape.length - 1)
|
|
20
19
|
throw new Error("Transpose16 currently only supports the last axis being unchanged.");
|
|
21
|
-
return
|
|
20
|
+
return o ? l(a(m(n), e)) : a(n, e);
|
|
22
21
|
}
|
|
23
|
-
const
|
|
22
|
+
const k = {
|
|
24
23
|
kernelName: "Transpose16",
|
|
25
24
|
backendName: "webgl",
|
|
26
25
|
kernelFunc: c
|
|
27
26
|
};
|
|
28
|
-
p(
|
|
29
|
-
const
|
|
27
|
+
p(k);
|
|
28
|
+
const h = {
|
|
30
29
|
kernelName: "Transpose16",
|
|
31
30
|
backendName: "cpu",
|
|
32
31
|
kernelFunc: c
|
|
33
32
|
};
|
|
34
|
-
p(
|
|
35
|
-
function
|
|
36
|
-
return s == null && (s =
|
|
33
|
+
p(h);
|
|
34
|
+
function T(r, s) {
|
|
35
|
+
return s == null && (s = r.shape.map((n, e) => e).reverse()), u().runKernel("Transpose16", { x: r }, { perm: s });
|
|
37
36
|
}
|
|
38
37
|
export {
|
|
39
|
-
|
|
40
|
-
|
|
38
|
+
T as transpose16,
|
|
39
|
+
d as transpose16GradConfig
|
|
41
40
|
};
|
package/dist/ops/unpack16.js
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import { r as n } from "../../Reshape-
|
|
2
|
-
import "../../index-
|
|
3
|
-
import { r as f } from "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import { r as n } from "../../Reshape-Zt6eb7yh.js";
|
|
2
|
+
import { p as f } from "../../index-DOvlwCh-.js";
|
|
4
3
|
class v {
|
|
5
4
|
variableNames = ["moments", "value"];
|
|
6
5
|
outputShape;
|
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { r as d } from "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import { p as m } from "../../index-DOvlwCh-.js";
|
|
3
2
|
class h {
|
|
4
3
|
variableNames = ["q", "k"];
|
|
5
4
|
outputShape;
|
|
@@ -35,12 +34,12 @@ class h {
|
|
|
35
34
|
}
|
|
36
35
|
}
|
|
37
36
|
function l(o) {
|
|
38
|
-
const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3],
|
|
39
|
-
return a.runWebGLProgram(
|
|
37
|
+
const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], d = new h(i, u, r, c, p);
|
|
38
|
+
return a.runWebGLProgram(d, [t, e], "float32", [[s], [n], [Number.NEGATIVE_INFINITY]]);
|
|
40
39
|
}
|
|
41
40
|
const f = {
|
|
42
41
|
kernelName: "AttentionMask",
|
|
43
42
|
backendName: "webgl",
|
|
44
43
|
kernelFunc: l
|
|
45
44
|
};
|
|
46
|
-
|
|
45
|
+
m(f);
|
|
@@ -1,9 +1,7 @@
|
|
|
1
|
-
import { m as b, s as I, r as k } from "../../RealDiv-
|
|
2
|
-
import { r as v } from "../../Reshape-
|
|
3
|
-
import "../../index-
|
|
4
|
-
import {
|
|
5
|
-
import { p as P } from "../../tensor-CzmOBsdf.js";
|
|
6
|
-
import { e as S } from "../../axis_util-BsIr9ZNu.js";
|
|
1
|
+
import { m as b, s as I, r as k } from "../../RealDiv-KAPDe8zB.js";
|
|
2
|
+
import { r as v } from "../../Reshape-Zt6eb7yh.js";
|
|
3
|
+
import { p as w, af as P } from "../../index-DOvlwCh-.js";
|
|
4
|
+
import { e as S } from "../../axis_util-BaG7mf5A.js";
|
|
7
5
|
class T {
|
|
8
6
|
variableNames = ["logits", "maxLogits"];
|
|
9
7
|
outputShape;
|
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
|
|
3
|
-
class l {
|
|
1
|
+
import { p as l } from "../../index-DOvlwCh-.js";
|
|
2
|
+
class u {
|
|
4
3
|
variableNames = ["labels", "logits", "values"];
|
|
5
4
|
outputShape;
|
|
6
5
|
userCode;
|
|
@@ -16,13 +15,13 @@ class l {
|
|
|
16
15
|
`;
|
|
17
16
|
}
|
|
18
17
|
}
|
|
19
|
-
function
|
|
20
|
-
const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new
|
|
18
|
+
function i(t) {
|
|
19
|
+
const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new u(r);
|
|
21
20
|
return a.runWebGLProgram(n, [o, e, s], "float32");
|
|
22
21
|
}
|
|
23
22
|
const c = {
|
|
24
23
|
kernelName: "EfficientGatherSub",
|
|
25
24
|
backendName: "webgl",
|
|
26
|
-
kernelFunc:
|
|
25
|
+
kernelFunc: i
|
|
27
26
|
};
|
|
28
|
-
|
|
27
|
+
l(c);
|
package/dist/ops/webgl/gelu.js
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { u as s, C as i } from "../../kernel_funcs_utils-
|
|
3
|
-
import { r as a } from "../../tensor_util-DfwaWayG.js";
|
|
1
|
+
import { p as a } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { u as s, C as i } from "../../kernel_funcs_utils-CCzYdUZg.js";
|
|
4
3
|
const t = 0.7978845608028654, r = 0.044715, c = i + `
|
|
5
4
|
float x3 = x * x * x;
|
|
6
5
|
float inner = x + ${r} * x3;
|
package/dist/ops/webgl/log.js
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { u as
|
|
3
|
-
import { y as
|
|
4
|
-
|
|
5
|
-
const t = e + `
|
|
1
|
+
import { p as e, ao as r } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { u as s, l as N } from "../../kernel_funcs_utils-CCzYdUZg.js";
|
|
3
|
+
import { y as l } from "../../shared-DT1TkE6w.js";
|
|
4
|
+
const a = N + `
|
|
6
5
|
return x < 0.0 ? NAN : log(x);
|
|
7
|
-
`,
|
|
6
|
+
`, t = `
|
|
8
7
|
vec4 result = log(x);
|
|
9
8
|
bvec4 isNaN = isnan(x);
|
|
10
9
|
result.r = isNaN.r ? x.r : (x.r < 0.0 ? NAN : result.r);
|
|
@@ -12,13 +11,13 @@ const t = e + `
|
|
|
12
11
|
result.b = isNaN.b ? x.b : (x.b < 0.0 ? NAN : result.b);
|
|
13
12
|
result.a = isNaN.a ? x.a : (x.a < 0.0 ? NAN : result.a);
|
|
14
13
|
return result;
|
|
15
|
-
`, n =
|
|
16
|
-
opSnippet:
|
|
17
|
-
packedOpSnippet:
|
|
18
|
-
cpuKernelImpl:
|
|
14
|
+
`, n = s({
|
|
15
|
+
opSnippet: a,
|
|
16
|
+
packedOpSnippet: t,
|
|
17
|
+
cpuKernelImpl: l
|
|
19
18
|
}), o = {
|
|
20
|
-
kernelName:
|
|
19
|
+
kernelName: r,
|
|
21
20
|
backendName: "webgl",
|
|
22
21
|
kernelFunc: n
|
|
23
22
|
};
|
|
24
|
-
|
|
23
|
+
e(o);
|
|
@@ -1,17 +1,16 @@
|
|
|
1
1
|
import { isPackedTensor as k } from "../../utilities/packed.js";
|
|
2
|
-
import { m as M, b as m } from "../../index-
|
|
3
|
-
import { matMulMul as
|
|
4
|
-
import { matMulGelu as
|
|
5
|
-
import {
|
|
6
|
-
import {
|
|
7
|
-
import {
|
|
8
|
-
import { t as h } from "../../transpose-DH4gmHvu.js";
|
|
2
|
+
import { p as g, m as M, b as m } from "../../index-DOvlwCh-.js";
|
|
3
|
+
import { matMulMul as N } from "../matMulMul.js";
|
|
4
|
+
import { matMulGelu as U } from "../matMulGelu.js";
|
|
5
|
+
import { m as G } from "../../mat_mul-SjpJRLyL.js";
|
|
6
|
+
import { r as w } from "../../reshape-ByE68wS9.js";
|
|
7
|
+
import { t as h } from "../../transpose-ClWiBS_b.js";
|
|
9
8
|
function P(p) {
|
|
10
|
-
const { A: r, B: o } = p.inputs, { transposeA: l, transposeB: c, scale: u, activation: A, scaleA:
|
|
9
|
+
const { A: r, B: o } = p.inputs, { transposeA: l, transposeB: c, scale: u, activation: A, scaleA: d, scaleB: f, forceOutputShape: t, perm: n } = p.attrs, B = !k(r), v = !k(o);
|
|
11
10
|
if (B && v) {
|
|
12
|
-
const a =
|
|
11
|
+
const a = d !== void 0 ? M(r, m(d)) : r, i = f !== void 0 ? M(o, m(f)) : o;
|
|
13
12
|
let e;
|
|
14
|
-
if (u !== void 0 ? e =
|
|
13
|
+
if (u !== void 0 ? e = N(a, i, m(u), l, c) : A === "gelu" ? e = U(a, i) : e = G(a, i, l, c), n)
|
|
15
14
|
if (t) {
|
|
16
15
|
const s = w(e, t);
|
|
17
16
|
e.dispose();
|
|
@@ -34,4 +33,4 @@ const C = {
|
|
|
34
33
|
backendName: "webgl",
|
|
35
34
|
kernelFunc: P
|
|
36
35
|
};
|
|
37
|
-
|
|
36
|
+
g(C);
|