@genai-fi/nanogpt 0.9.1 → 0.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +352 -14
- package/dist/Generator.js +69 -78
- package/dist/{RealDiv-D4EzDsC0.js → RealDiv-DgA3z9oO.js} +32 -206
- package/dist/Reshape-CF6odzV4.js +16 -0
- package/dist/Reshape-_kILl6tK.js +81 -0
- package/dist/TeachableLLM.js +28 -22
- package/dist/Trainer.d.ts +2 -0
- package/dist/Trainer.js +3 -2
- package/dist/{axis_util-TbGYJ208.js → axis_util-BvHEw88j.js} +7 -23
- package/dist/backend.d.ts +2 -1
- package/dist/backend.js +10 -4
- package/dist/backend_util-D-rUb2ty.js +474 -0
- package/dist/backend_webgpu-B0u2ndUn.js +547 -0
- package/dist/binary_op_util-pKXltfxI.js +192 -0
- package/dist/broadcast_to-CwF7XIeu.js +30 -0
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/check.d.ts +1 -1
- package/dist/checks/check.js +8 -8
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/index.d.ts +2 -0
- package/dist/checks/index.js +7 -5
- package/dist/checks/matMulGelu.js +6 -6
- package/dist/checks/normRMS.js +7 -7
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.d.ts +1 -0
- package/dist/checks/packUnpack.js +18 -0
- package/dist/checks/qkv.js +12 -27
- package/dist/checks/rope.js +2 -2
- package/dist/checks/weights.js +18 -16
- package/dist/complex-CSlYz-2T.js +13 -0
- package/dist/complex_util-Yc1A_gV1.js +55 -0
- package/dist/concat-BHlIJeyT.js +19 -0
- package/dist/concat_util-DcJk7YHS.js +22 -0
- package/dist/data/docx.js +1 -1
- package/dist/data/parquet.js +2 -2
- package/dist/data/pdf.js +1 -1
- package/dist/data/textLoader.js +1 -1
- package/dist/{dataset-DlZtKmBq.js → dataset-0xP8GjwI.js} +136 -236
- package/dist/dropout-C1pM3f11.js +99 -0
- package/dist/expand_dims-BPG4fwBP.js +13 -0
- package/dist/exports_initializers-xuidcwI4.js +7 -0
- package/dist/gather-DykLGqmW.js +10 -0
- package/dist/{gelu-Bp_-935b.js → gelu-CNLFZWea.js} +11 -10
- package/dist/{gpgpu_math-CDaYiyE_.js → gpgpu_math-DDVJCn6-.js} +90 -265
- package/dist/{index-C4L8Cm77.js → index-CieiGp4Y.js} +14 -14
- package/dist/index-CjOj7j-u.js +7308 -0
- package/dist/{index-Tf7vU29b.js → index-Cp39cXWe.js} +3 -10
- package/dist/{index-Dwqa6Zy2.js → index-DvYrXKkX.js} +2 -2
- package/dist/index-ZyQhjEPo.js +2157 -0
- package/dist/{jszip.min-CjP2V1VV.js → jszip.min-Bz5-11Bk.js} +56 -57
- package/dist/kernel_funcs_utils-Dg_-E44D.js +308 -0
- package/dist/layers/BaseLayer.d.ts +1 -0
- package/dist/layers/BaseLayer.js +7 -6
- package/dist/layers/CausalSelfAttention.d.ts +0 -1
- package/dist/layers/CausalSelfAttention.js +56 -55
- package/dist/layers/MLP.js +15 -16
- package/dist/layers/PositionEmbedding.js +5 -14
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.d.ts +2 -0
- package/dist/layers/RoPECache.js +22 -17
- package/dist/layers/TiedEmbedding.js +22 -17
- package/dist/layers/TransformerBlock.js +21 -20
- package/dist/loader/load.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +39 -33
- package/dist/loader/save.js +1 -1
- package/dist/log_sum_exp-DWI-76TI.js +41 -0
- package/dist/main.d.ts +8 -0
- package/dist/main.js +63 -52
- package/dist/matMul16--R5hOwDG.js +77 -0
- package/dist/mat_mul-DeAh4uTH.js +12 -0
- package/dist/mod-Gt1rMB4n.js +12 -0
- package/dist/models/NanoGPTV1.js +40 -31
- package/dist/models/model.d.ts +2 -0
- package/dist/models/model.js +37 -29
- package/dist/{mulmat_packed_gpu-BT60jmzP.js → mulmat_packed_gpu-BMFhLwta.js} +1 -17
- package/dist/{non_max_suppression_impl-CsEgBuMA.js → non_max_suppression_impl-B2W7YjZB.js} +0 -32
- package/dist/ones-CAMiP4I2.js +15 -0
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.d.ts +1 -1
- package/dist/ops/adamMoments.js +4 -4
- package/dist/ops/add16.d.ts +2 -0
- package/dist/ops/add16.js +9 -0
- package/dist/ops/appendCache.js +16 -9
- package/dist/ops/attentionMask.js +4 -4
- package/dist/ops/concat16.d.ts +2 -0
- package/dist/ops/concat16.js +9 -0
- package/dist/ops/cpu/adamAdjust.js +14 -13
- package/dist/ops/cpu/adamMoments.js +10 -9
- package/dist/ops/cpu/appendCache.js +9 -8
- package/dist/ops/cpu/attentionMask.js +15 -14
- package/dist/ops/cpu/fusedSoftmax.js +13 -12
- package/dist/ops/cpu/gatherSub.js +9 -24
- package/dist/ops/cpu/gelu.js +13 -12
- package/dist/ops/cpu/matMul16.d.ts +1 -0
- package/dist/ops/cpu/matMul16.js +16 -0
- package/dist/ops/cpu/matMulGelu.js +18 -16
- package/dist/ops/cpu/matMulMul.js +8 -7
- package/dist/ops/cpu/mulDropout.js +4 -3
- package/dist/ops/cpu/normRMS.js +11 -10
- package/dist/ops/cpu/qkv.js +17 -13
- package/dist/ops/cpu/rope.js +23 -22
- package/dist/ops/cpu/scatterSub.js +16 -30
- package/dist/ops/dot16.d.ts +2 -0
- package/dist/ops/dot16.js +42 -0
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.d.ts +1 -0
- package/dist/ops/grads/add16.js +27 -0
- package/dist/ops/grads/attentionMask.js +12 -19
- package/dist/ops/grads/gelu.js +4 -3
- package/dist/ops/grads/matMul16.d.ts +2 -0
- package/dist/ops/grads/matMul16.js +9 -0
- package/dist/ops/grads/matMulGelu.js +8 -7
- package/dist/ops/grads/normRMS.js +8 -7
- package/dist/ops/grads/{fusedSoftmax.d.ts → pack16.d.ts} +1 -1
- package/dist/ops/grads/pack16.js +7 -0
- package/dist/ops/grads/qkv.d.ts +3 -1
- package/dist/ops/grads/qkv.js +28 -22
- package/dist/ops/grads/rope.d.ts +2 -1
- package/dist/ops/grads/rope.js +6 -13
- package/dist/ops/grads/softmax16.d.ts +2 -0
- package/dist/ops/grads/softmax16.js +26 -0
- package/dist/ops/grads/unpack16.d.ts +2 -0
- package/dist/ops/grads/unpack16.js +6 -0
- package/dist/ops/grads/utils.d.ts +3 -0
- package/dist/ops/grads/utils.js +10 -0
- package/dist/ops/matMul16.d.ts +15 -0
- package/dist/ops/matMul16.js +13 -0
- package/dist/ops/matMulGelu.js +1 -1
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.d.ts +2 -0
- package/dist/ops/mul16.js +8 -0
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.d.ts +2 -0
- package/dist/ops/pack16.js +6 -0
- package/dist/ops/qkv.d.ts +1 -1
- package/dist/ops/qkv.js +8 -4
- package/dist/ops/reshape16.d.ts +2 -0
- package/dist/ops/reshape16.js +43 -0
- package/dist/ops/rope.d.ts +1 -1
- package/dist/ops/rope.js +8 -10
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.d.ts +2 -0
- package/dist/ops/slice16.js +9 -0
- package/dist/ops/softmax16.d.ts +2 -0
- package/dist/ops/softmax16.js +12 -0
- package/dist/ops/sub16.d.ts +2 -0
- package/dist/ops/sub16.js +8 -0
- package/dist/ops/sum16.d.ts +2 -0
- package/dist/ops/sum16.js +13 -0
- package/dist/ops/transpose16.d.ts +3 -0
- package/dist/ops/transpose16.js +41 -0
- package/dist/ops/unpack16.d.ts +2 -0
- package/dist/ops/unpack16.js +6 -0
- package/dist/ops/webgl/adamAdjust.js +3 -2
- package/dist/ops/webgl/adamMoments.js +2 -1
- package/dist/ops/webgl/appendCache.js +2 -1
- package/dist/ops/webgl/attentionMask.js +5 -4
- package/dist/ops/webgl/fusedSoftmax.js +6 -4
- package/dist/ops/webgl/gatherSub.js +7 -6
- package/dist/ops/webgl/gelu.js +3 -2
- package/dist/ops/webgl/log.js +12 -27
- package/dist/ops/webgl/matMul16.d.ts +1 -0
- package/dist/ops/webgl/matMul16.js +37 -0
- package/dist/ops/webgl/matMulGelu.js +17 -15
- package/dist/ops/webgl/matMulMul.js +13 -12
- package/dist/ops/webgl/mulDropout.js +9 -8
- package/dist/ops/webgl/normRMS.js +8 -7
- package/dist/ops/webgl/qkv.js +6 -5
- package/dist/ops/webgl/rope.js +11 -10
- package/dist/ops/webgl/scatterSub.js +6 -5
- package/dist/ops/webgpu/adamAdjust.js +12 -10
- package/dist/ops/webgpu/adamMoments.js +27 -22
- package/dist/ops/webgpu/add16.d.ts +1 -0
- package/dist/ops/webgpu/add16.js +14 -0
- package/dist/ops/webgpu/appendCache.js +64 -17
- package/dist/ops/webgpu/attentionMask.js +19 -62
- package/dist/ops/webgpu/attentionMask32_program.d.ts +19 -0
- package/dist/ops/webgpu/attentionMask32_program.js +54 -0
- package/dist/ops/webgpu/concat16.d.ts +19 -0
- package/dist/ops/webgpu/concat16.js +128 -0
- package/dist/ops/webgpu/gatherSub.js +9 -7
- package/dist/ops/webgpu/gelu.js +78 -31
- package/dist/ops/webgpu/index.js +12 -0
- package/dist/ops/webgpu/matMul16.d.ts +1 -0
- package/dist/ops/webgpu/matMul16.js +58 -0
- package/dist/ops/webgpu/matMul16_program.d.ts +42 -0
- package/dist/ops/webgpu/matMul16_program.js +336 -0
- package/dist/ops/webgpu/mul16.d.ts +1 -0
- package/dist/ops/webgpu/mul16.js +14 -0
- package/dist/ops/webgpu/normRMS.js +21 -40
- package/dist/ops/webgpu/normRMS16_program.d.ts +9 -0
- package/dist/ops/webgpu/normRMS16_program.js +24 -0
- package/dist/ops/webgpu/normRMS32_program.d.ts +9 -0
- package/dist/ops/webgpu/normRMS32_program.js +24 -0
- package/dist/ops/webgpu/normRMSGrad.js +113 -64
- package/dist/ops/webgpu/pack16.d.ts +1 -0
- package/dist/ops/webgpu/pack16.js +19 -0
- package/dist/ops/webgpu/pack16_program.d.ts +19 -0
- package/dist/ops/webgpu/pack16_program.js +92 -0
- package/dist/ops/webgpu/qkv.js +20 -55
- package/dist/ops/webgpu/rope.js +77 -22
- package/dist/ops/webgpu/scatterSub.js +9 -7
- package/dist/ops/webgpu/slice16.d.ts +7 -0
- package/dist/ops/webgpu/slice16.js +71 -0
- package/dist/{variable-Bm2OFwGI.js → ops/webgpu/softmax16.d.ts} +2 -8
- package/dist/ops/webgpu/softmax16.js +23 -0
- package/dist/ops/webgpu/softmax16_program.d.ts +13 -0
- package/dist/ops/webgpu/softmax16_program.js +73 -0
- package/dist/ops/webgpu/softmax16_subgroup_program.d.ts +17 -0
- package/dist/ops/webgpu/softmax16_subgroup_program.js +75 -0
- package/dist/ops/webgpu/softmax16grad.d.ts +1 -0
- package/dist/ops/webgpu/softmax16grad.js +38 -0
- package/dist/ops/webgpu/sub16.d.ts +1 -0
- package/dist/ops/webgpu/sub16.js +14 -0
- package/dist/ops/webgpu/sum16.d.ts +1 -0
- package/dist/ops/webgpu/sum16.js +40 -0
- package/dist/ops/webgpu/transpose16.d.ts +1 -0
- package/dist/ops/webgpu/transpose16.js +35 -0
- package/dist/ops/webgpu/transpose16_program.d.ts +16 -0
- package/dist/ops/webgpu/transpose16_program.js +50 -0
- package/dist/ops/webgpu/transpose16_shared_program.d.ts +15 -0
- package/dist/ops/webgpu/transpose16_shared_program.js +71 -0
- package/dist/ops/webgpu/unpack16.d.ts +1 -0
- package/dist/ops/webgpu/unpack16.js +49 -0
- package/dist/ops/webgpu/utils/binary_op.d.ts +19 -0
- package/dist/ops/webgpu/utils/binary_op.js +79 -0
- package/dist/ops/webgpu/utils/deviceInfo.d.ts +7 -0
- package/dist/ops/webgpu/utils/deviceInfo.js +11 -0
- package/dist/ops/webgpu/utils/reductions.d.ts +32 -4
- package/dist/ops/webgpu/utils/reductions.js +236 -45
- package/dist/ops-CNI3TwqM.js +645 -0
- package/dist/pack16-CFUqumar.js +41 -0
- package/dist/{papaparse.min-C8l2Kvo1.js → papaparse.min-C0cScC2i.js} +2 -8
- package/dist/{parquet-C0Tlmv9c.js → parquet-BE8MU_ge.js} +201 -278
- package/dist/patches/PackedTensor.d.ts +12 -0
- package/dist/patches/PackedTensor.js +11 -0
- package/dist/patches/engine.d.ts +261 -0
- package/dist/patches/engine.js +10 -0
- package/dist/patches/tape.d.ts +12 -0
- package/dist/patches/tape.js +5 -0
- package/dist/patches/webgpu_backend.d.ts +18 -0
- package/dist/patches/webgpu_backend.js +57 -0
- package/dist/{tensor-CZr4dh61.js → patches/webgpu_base.d.ts} +5 -8
- package/dist/patches/webgpu_base.js +34 -0
- package/dist/patches/webgpu_program.d.ts +36 -0
- package/dist/patches/webgpu_program.js +401 -0
- package/dist/{pdf-kJD-f258.js → pdf-NIhmP3sq.js} +424 -428
- package/dist/random_width-DY6Kk2Dl.js +10051 -0
- package/dist/range-BMS52eQi.js +11 -0
- package/dist/reciprocal-CTmshQ9J.js +10 -0
- package/dist/{register_all_kernels-DIGpEwcf.js → register_all_kernels-Bwu1PTuU.js} +719 -9766
- package/dist/relu-yZ2-7WxU.js +10 -0
- package/dist/reshape-DevtBWtf.js +10 -0
- package/dist/rope-B5UUMsPi.js +32 -0
- package/dist/{scatter_nd_util-BQdz--Gn.js → scatter_nd_util-5EL-8VAQ.js} +1 -1
- package/dist/selu_util-D1w6yyTO.js +303 -0
- package/dist/{shared-DuP7ue-R.js → shared-BRksrJb3.js} +1 -17
- package/dist/shared-BuAXb4CI.js +2145 -0
- package/dist/sin-BGfy2HZo.js +16 -0
- package/dist/slice-D_gkkqZK.js +13 -0
- package/dist/slice_util-DtEldBfK.js +261 -0
- package/dist/softmax-ZHVebtR1.js +13 -0
- package/dist/split-DrfihRpZ.js +10 -0
- package/dist/squeeze-DZEpeblb.js +11 -0
- package/dist/stack-yOIAalTq.js +13 -0
- package/dist/sum-_fzj5ZTB.js +12 -0
- package/dist/tensor-DdQUJZlz.js +909 -0
- package/dist/tensor-f35l8Odg.js +8 -0
- package/dist/tensor1d-CeZuc-Rv.js +12 -0
- package/dist/tensor2d-G4Ys2GxX.js +15 -0
- package/dist/tensor4d-B8roDgtc.js +15 -0
- package/dist/tensor_util-DV-FP5Q3.js +523 -0
- package/dist/tfjs_backend-kNyO5L2d.js +653 -0
- package/dist/tile-BzyEiF-F.js +13 -0
- package/dist/tokeniser/CharTokeniser.js +1 -1
- package/dist/tokeniser/bpe.js +1 -1
- package/dist/training/Adam.d.ts +2 -1
- package/dist/training/Adam.js +12 -28
- package/dist/training/AdamExt.d.ts +1 -0
- package/dist/training/AdamExt.js +2 -2
- package/dist/training/DatasetBuilder.js +3 -20
- package/dist/training/FullTrainer.js +55 -48
- package/dist/training/Trainer.d.ts +11 -6
- package/dist/training/Trainer.js +51 -39
- package/dist/training/sparseCrossEntropy.js +3 -3
- package/dist/transpose-DKELTqhe.js +38 -0
- package/dist/utilities/arrayClose.js +7 -7
- package/dist/utilities/dummy.js +35 -27
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.d.ts +7 -0
- package/dist/utilities/packed.js +716 -0
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.d.ts +5 -0
- package/dist/utilities/sentences.js +41 -0
- package/dist/utilities/weights.js +2 -2
- package/dist/variable-Bhn5bHYv.js +7 -0
- package/dist/{webgpu_program-DkQJOJSd.js → webgpu_program-Cigz-7RF.js} +15 -44
- package/dist/webgpu_util-BBCnKm2X.js +65 -0
- package/dist/zeros-2gldETuK.js +14 -0
- package/package.json +4 -3
- package/dist/Reshape-Bowtk9BP.js +0 -127
- package/dist/Reshape-DUqYftGC.js +0 -30
- package/dist/backend_util-CJIiDoV1.js +0 -749
- package/dist/broadcast_to-DzlNweb8.js +0 -44
- package/dist/concat-B912vBbo.js +0 -33
- package/dist/dropout-C-csYCLj.js +0 -193
- package/dist/exports_initializers-B8iZMgQ0.js +0 -16
- package/dist/gather-Dnpgw-YQ.js +0 -25
- package/dist/index-BzFyqcy-.js +0 -4457
- package/dist/index-C1rx_Ajs.js +0 -12076
- package/dist/kernel_funcs_utils-DKLK0Mg3.js +0 -466
- package/dist/log_sum_exp-DO6z8tSE.js +0 -103
- package/dist/mat_mul-DzjTFx-u.js +0 -27
- package/dist/mod-Dobti4j4.js +0 -27
- package/dist/ones-tIJeHlq-.js +0 -29
- package/dist/ops/fusedSoftmax.d.ts +0 -2
- package/dist/ops/fusedSoftmax.js +0 -10
- package/dist/ops/grads/fusedSoftmax.js +0 -22
- package/dist/ops-LuCMAnmM.js +0 -1525
- package/dist/random_width-CXVRloNK.js +0 -13670
- package/dist/range-CWcz7xFA.js +0 -26
- package/dist/reciprocal-C4rNcM-S.js +0 -25
- package/dist/relu-BjCh_SYb.js +0 -25
- package/dist/reshape-CnIwVG1c.js +0 -25
- package/dist/selu_util-OtRzVwW5.js +0 -719
- package/dist/shared-DmRsFyaJ.js +0 -3134
- package/dist/sin-gpDNRxE0.js +0 -47
- package/dist/slice-d0Vo9XTN.js +0 -28
- package/dist/softmax-D7Jj3p_P.js +0 -28
- package/dist/split-DK2k5eHf.js +0 -25
- package/dist/stack-DFatutCx.js +0 -27
- package/dist/sum-CJ0ULhmt.js +0 -27
- package/dist/tensor1d-vML0r3q6.js +0 -27
- package/dist/tensor2d-D76QGjF3.js +0 -30
- package/dist/tensor4d-Df1WlVDY.js +0 -30
- package/dist/webgpu_util-pLEV9tks.js +0 -80
- package/dist/zeros-Bj5rMYA7.js +0 -52
package/dist/ops/cpu/gelu.js
CHANGED
|
@@ -1,29 +1,30 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { t as d } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { r } from "../../tensor_util-DV-FP5Q3.js";
|
|
2
3
|
const o = 0.7978845608028654, c = 0.044715;
|
|
3
|
-
function m(
|
|
4
|
-
const { inputs: u } =
|
|
4
|
+
function m(t) {
|
|
5
|
+
const { inputs: u } = t, { x: n } = u, e = n;
|
|
5
6
|
return d(() => {
|
|
6
7
|
const l = e.pow(3), s = e.add(l.mul(c)).mul(o).tanh().add(1).mul(0.5);
|
|
7
8
|
return e.mul(s);
|
|
8
9
|
});
|
|
9
10
|
}
|
|
10
|
-
const
|
|
11
|
+
const N = {
|
|
11
12
|
kernelName: "Gelu",
|
|
12
13
|
backendName: "cpu",
|
|
13
14
|
kernelFunc: m
|
|
14
15
|
};
|
|
15
|
-
|
|
16
|
+
r(N);
|
|
16
17
|
const K = {
|
|
17
18
|
kernelName: "Gelu",
|
|
18
19
|
backendName: "tensorflow",
|
|
19
20
|
kernelFunc: m
|
|
20
21
|
};
|
|
21
|
-
|
|
22
|
-
function i(
|
|
23
|
-
const { dy: u, x: n } =
|
|
22
|
+
r(K);
|
|
23
|
+
function i(t) {
|
|
24
|
+
const { dy: u, x: n } = t.inputs;
|
|
24
25
|
return d(() => {
|
|
25
|
-
const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5),
|
|
26
|
-
return u.mul(
|
|
26
|
+
const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), p = g.add(G);
|
|
27
|
+
return u.mul(p);
|
|
27
28
|
});
|
|
28
29
|
}
|
|
29
30
|
const x = {
|
|
@@ -31,10 +32,10 @@ const x = {
|
|
|
31
32
|
backendName: "cpu",
|
|
32
33
|
kernelFunc: i
|
|
33
34
|
};
|
|
34
|
-
|
|
35
|
+
r(x);
|
|
35
36
|
const h = {
|
|
36
37
|
kernelName: "GeluGrad",
|
|
37
38
|
backendName: "tensorflow",
|
|
38
39
|
kernelFunc: i
|
|
39
40
|
};
|
|
40
|
-
|
|
41
|
+
r(h);
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { isPackedTensor as t } from "../../utilities/packed.js";
|
|
2
|
+
import "../../index-ZyQhjEPo.js";
|
|
3
|
+
import { r as p } from "../../tensor_util-DV-FP5Q3.js";
|
|
4
|
+
import { m } from "../../mat_mul-DeAh4uTH.js";
|
|
5
|
+
function l(r) {
|
|
6
|
+
const { A: e, B: n } = r.inputs, { transposeA: o, transposeB: s } = r.attrs, a = !t(e), c = !t(n);
|
|
7
|
+
if (a && c)
|
|
8
|
+
return m(e, n, o, s);
|
|
9
|
+
throw new Error("MatMul16 CPU kernel only supports packed tensors currently.");
|
|
10
|
+
}
|
|
11
|
+
const u = {
|
|
12
|
+
kernelName: "MatMul16",
|
|
13
|
+
backendName: "cpu",
|
|
14
|
+
kernelFunc: l
|
|
15
|
+
};
|
|
16
|
+
p(u);
|
|
@@ -1,10 +1,12 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { g as
|
|
1
|
+
import { t as m } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { g as i, d as M } from "../../gelu-CNLFZWea.js";
|
|
3
|
+
import { r as e } from "../../tensor_util-DV-FP5Q3.js";
|
|
4
|
+
import { m as k } from "../../mat_mul-DeAh4uTH.js";
|
|
3
5
|
function c(t) {
|
|
4
|
-
const { inputs: u } = t, { x: n, kernel:
|
|
6
|
+
const { inputs: u } = t, { x: n, kernel: r } = u, a = n, l = r;
|
|
5
7
|
return m(() => {
|
|
6
|
-
const o = a
|
|
7
|
-
return
|
|
8
|
+
const o = k(a, l);
|
|
9
|
+
return i(o);
|
|
8
10
|
});
|
|
9
11
|
}
|
|
10
12
|
const G = {
|
|
@@ -13,23 +15,23 @@ const G = {
|
|
|
13
15
|
kernelFunc: c
|
|
14
16
|
};
|
|
15
17
|
e(G);
|
|
16
|
-
const
|
|
18
|
+
const f = {
|
|
17
19
|
kernelName: "MatMulGelu",
|
|
18
20
|
backendName: "tensorflow",
|
|
19
21
|
kernelFunc: c
|
|
20
22
|
};
|
|
21
|
-
e(
|
|
22
|
-
const
|
|
23
|
+
e(f);
|
|
24
|
+
const p = {
|
|
23
25
|
kernelName: "MatMulGelu",
|
|
24
26
|
backendName: "webgpu",
|
|
25
27
|
kernelFunc: c
|
|
26
28
|
};
|
|
27
|
-
e(
|
|
29
|
+
e(p);
|
|
28
30
|
function s(t) {
|
|
29
|
-
const { dy: u, x: n, kernel:
|
|
31
|
+
const { dy: u, x: n, kernel: r } = t.inputs;
|
|
30
32
|
return m(() => {
|
|
31
|
-
const a = n
|
|
32
|
-
return [o,
|
|
33
|
+
const a = k(n, r), l = M(u, a), o = l.matMul(r.transpose()), d = n.transpose().matMul(l);
|
|
34
|
+
return [o, d];
|
|
33
35
|
});
|
|
34
36
|
}
|
|
35
37
|
const g = {
|
|
@@ -38,15 +40,15 @@ const g = {
|
|
|
38
40
|
kernelFunc: s
|
|
39
41
|
};
|
|
40
42
|
e(g);
|
|
41
|
-
const
|
|
43
|
+
const N = {
|
|
42
44
|
kernelName: "MatMulGeluGrad",
|
|
43
45
|
backendName: "tensorflow",
|
|
44
46
|
kernelFunc: s
|
|
45
47
|
};
|
|
46
|
-
e(
|
|
47
|
-
const
|
|
48
|
+
e(N);
|
|
49
|
+
const b = {
|
|
48
50
|
kernelName: "MatMulGeluGrad",
|
|
49
51
|
backendName: "webgpu",
|
|
50
52
|
kernelFunc: s
|
|
51
53
|
};
|
|
52
|
-
e(
|
|
54
|
+
e(b);
|
|
@@ -1,20 +1,21 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { t as M } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { r as e } from "../../tensor_util-DV-FP5Q3.js";
|
|
2
3
|
function n(t) {
|
|
3
|
-
const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u,
|
|
4
|
-
return
|
|
4
|
+
const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, i = a, k = c;
|
|
5
|
+
return M(() => m.matMul(i, o, s).mul(k));
|
|
5
6
|
}
|
|
6
|
-
const
|
|
7
|
+
const p = {
|
|
7
8
|
kernelName: "MatMulMul",
|
|
8
9
|
backendName: "cpu",
|
|
9
10
|
kernelFunc: n
|
|
10
11
|
};
|
|
11
|
-
e(
|
|
12
|
-
const
|
|
12
|
+
e(p);
|
|
13
|
+
const f = {
|
|
13
14
|
kernelName: "MatMulMul",
|
|
14
15
|
backendName: "tensorflow",
|
|
15
16
|
kernelFunc: n
|
|
16
17
|
};
|
|
17
|
-
e(
|
|
18
|
+
e(f);
|
|
18
19
|
const g = {
|
|
19
20
|
kernelName: "MatMulMul",
|
|
20
21
|
backendName: "webgpu",
|
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { m as u } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { r as e } from "../../tensor_util-DV-FP5Q3.js";
|
|
2
3
|
function n(o) {
|
|
3
|
-
const { inputs: r } = o, { a: l, b:
|
|
4
|
-
return console.warn("Using fallback mulDrop implementation without dropout."),
|
|
4
|
+
const { inputs: r } = o, { a: l, b: t } = r;
|
|
5
|
+
return console.warn("Using fallback mulDrop implementation without dropout."), u(l, t);
|
|
5
6
|
}
|
|
6
7
|
const a = {
|
|
7
8
|
kernelName: "MulDropout",
|
package/dist/ops/cpu/normRMS.js
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { t as d } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { r as a } from "../../tensor_util-DV-FP5Q3.js";
|
|
2
3
|
function i(t) {
|
|
3
|
-
const { inputs: e } = t, { x: n, gamma: s } = e, r = n,
|
|
4
|
+
const { inputs: e } = t, { x: n, gamma: s } = e, r = n, m = s;
|
|
4
5
|
return d(() => {
|
|
5
6
|
const u = r.square().mean(-1, !0).add(1e-8).rsqrt();
|
|
6
|
-
return r.mul(u).mul(
|
|
7
|
+
return r.mul(u).mul(m);
|
|
7
8
|
});
|
|
8
9
|
}
|
|
9
10
|
const k = {
|
|
@@ -11,18 +12,18 @@ const k = {
|
|
|
11
12
|
backendName: "cpu",
|
|
12
13
|
kernelFunc: i
|
|
13
14
|
};
|
|
14
|
-
|
|
15
|
+
a(k);
|
|
15
16
|
const g = {
|
|
16
17
|
kernelName: "RMSNorm",
|
|
17
18
|
backendName: "tensorflow",
|
|
18
19
|
kernelFunc: i
|
|
19
20
|
};
|
|
20
|
-
|
|
21
|
+
a(g);
|
|
21
22
|
function N(t) {
|
|
22
23
|
const { dy: e, x: n, gamma: s } = t.inputs;
|
|
23
24
|
return d(() => {
|
|
24
|
-
const r = n.shape[n.shape.length - 1],
|
|
25
|
-
return [c.mul(
|
|
25
|
+
const r = n.shape[n.shape.length - 1], m = n.square().mean(-1, !0), o = m.add(1e-8).rsqrt(), u = n.mul(o), l = e.mul(u).sum([0, 1]), c = e.mul(s), f = c.mul(n).sum(-1, !0).div(r);
|
|
26
|
+
return [c.mul(o).sub(n.mul(f).mul(o).div(m.add(1e-8))), l];
|
|
26
27
|
});
|
|
27
28
|
}
|
|
28
29
|
const S = {
|
|
@@ -30,10 +31,10 @@ const S = {
|
|
|
30
31
|
backendName: "cpu",
|
|
31
32
|
kernelFunc: N
|
|
32
33
|
};
|
|
33
|
-
|
|
34
|
-
const
|
|
34
|
+
a(S);
|
|
35
|
+
const p = {
|
|
35
36
|
kernelName: "RMSNormGrad",
|
|
36
37
|
backendName: "tensorflow",
|
|
37
38
|
kernelFunc: N
|
|
38
39
|
};
|
|
39
|
-
|
|
40
|
+
a(p);
|
package/dist/ops/cpu/qkv.js
CHANGED
|
@@ -1,25 +1,29 @@
|
|
|
1
|
-
import
|
|
2
|
-
import { r as
|
|
3
|
-
import {
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { r as q } from "../../tensor_util-DV-FP5Q3.js";
|
|
3
|
+
import { r as o } from "../../reshape-DevtBWtf.js";
|
|
4
|
+
import { s as x } from "../../split-DrfihRpZ.js";
|
|
4
5
|
function v(p) {
|
|
5
|
-
const { x: c, kernel: K } = p.inputs, { heads: n } = p.attrs
|
|
6
|
+
const { x: c, kernel: K } = p.inputs, { heads: n, packed: C } = p.attrs;
|
|
7
|
+
if (C)
|
|
8
|
+
throw new Error("QKV CPU implementation does not support packed tensors.");
|
|
9
|
+
const [s, e, t] = c.shape, a = o(c, [s * e, t]), i = a.dot(K);
|
|
6
10
|
a.dispose();
|
|
7
11
|
const d = o(i, [s, e, 3 * t]);
|
|
8
12
|
i.dispose();
|
|
9
|
-
const [k,
|
|
13
|
+
const [k, m, l] = x(d, 3, -1);
|
|
10
14
|
d.dispose();
|
|
11
15
|
const r = t / n, f = o(k, [s, e, n, r]);
|
|
12
16
|
k.dispose();
|
|
13
|
-
const
|
|
17
|
+
const w = f.transpose([0, 2, 1, 3]);
|
|
14
18
|
f.dispose();
|
|
15
|
-
const h = o(
|
|
16
|
-
|
|
19
|
+
const h = o(m, [s, e, n, r]);
|
|
20
|
+
m.dispose();
|
|
17
21
|
const N = h.transpose([0, 2, 1, 3]);
|
|
18
22
|
h.dispose();
|
|
19
|
-
const u = o(
|
|
20
|
-
|
|
23
|
+
const u = o(l, [s, e, n, r]);
|
|
24
|
+
l.dispose();
|
|
21
25
|
const T = u.transpose([0, 2, 1, 3]);
|
|
22
|
-
return u.dispose(), [
|
|
26
|
+
return u.dispose(), [w, N, T];
|
|
23
27
|
}
|
|
24
28
|
const F = {
|
|
25
29
|
kernelName: "QKV",
|
|
@@ -27,12 +31,12 @@ const F = {
|
|
|
27
31
|
kernelFunc: v
|
|
28
32
|
};
|
|
29
33
|
q(F);
|
|
30
|
-
const
|
|
34
|
+
const Q = {
|
|
31
35
|
kernelName: "QKV",
|
|
32
36
|
backendName: "tensorflow",
|
|
33
37
|
kernelFunc: v
|
|
34
38
|
};
|
|
35
|
-
q(
|
|
39
|
+
q(Q);
|
|
36
40
|
export {
|
|
37
41
|
v as qkvCPU
|
|
38
42
|
};
|
package/dist/ops/cpu/rope.js
CHANGED
|
@@ -1,37 +1,38 @@
|
|
|
1
|
-
import
|
|
2
|
-
import { r as
|
|
3
|
-
import {
|
|
4
|
-
import {
|
|
5
|
-
import {
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { r as I } from "../../tensor_util-DV-FP5Q3.js";
|
|
3
|
+
import { r as y } from "../../range-BMS52eQi.js";
|
|
4
|
+
import { g as F } from "../../gather-DykLGqmW.js";
|
|
5
|
+
import { s as E } from "../../stack-yOIAalTq.js";
|
|
6
|
+
import { c as T } from "../../concat-BHlIJeyT.js";
|
|
7
|
+
function U(c, r, p, e, n) {
|
|
8
|
+
const t = e.shape[3], s = p;
|
|
9
|
+
if (s > t) return e;
|
|
10
|
+
const o = e.shape[2], i = s / 2, a = r.slice([n, 0, 0], [o, i, 1]).reshape([1, 1, o, i]), d = c.slice([n, 0, 0], [o, i, 1]).reshape([1, 1, o, i]), l = e.shape[0], m = e.shape[1], h = y(0, s, 2, "int32"), g = y(1, s, 2, "int32"), D = ((k) => {
|
|
11
|
+
const C = k.slice([0, 0, 0, 0], [l, m, o, s]), R = s < t ? k.slice([0, 0, 0, s], [l, m, o, t - s]) : null, u = F(C, h, 3), f = F(C, g, 3), v = u.mul(a), N = f.mul(d), S = v.sub(N), P = f.mul(a), b = u.mul(d), x = P.add(b);
|
|
12
|
+
u.dispose(), f.dispose(), a.dispose(), d.dispose(), v.dispose(), N.dispose(), P.dispose(), b.dispose();
|
|
13
|
+
const K = E([S, x], -1);
|
|
14
|
+
S.dispose(), x.dispose();
|
|
15
|
+
const w = K.reshape([l, m, o, s]);
|
|
16
|
+
return K.dispose(), R ? T([w, R], 3) : w;
|
|
17
|
+
})(e);
|
|
18
|
+
return h.dispose(), g.dispose(), D;
|
|
18
19
|
}
|
|
19
|
-
function B(
|
|
20
|
-
const { x: c,
|
|
21
|
-
return U(
|
|
20
|
+
function B(c) {
|
|
21
|
+
const { x: r } = c.inputs, { pastLen: p, negSin: e, ropeCache: n } = c.attrs, t = r.shape[3], s = e ? n.getNegSin() : n.getSin(), o = n.getCos();
|
|
22
|
+
return U(s, o, t, r, p);
|
|
22
23
|
}
|
|
23
24
|
const j = {
|
|
24
25
|
kernelName: "Rope",
|
|
25
26
|
backendName: "cpu",
|
|
26
27
|
kernelFunc: B
|
|
27
28
|
};
|
|
28
|
-
|
|
29
|
+
I(j);
|
|
29
30
|
const z = {
|
|
30
31
|
kernelName: "Rope",
|
|
31
32
|
backendName: "tensorflow",
|
|
32
33
|
kernelFunc: B
|
|
33
34
|
};
|
|
34
|
-
|
|
35
|
+
I(z);
|
|
35
36
|
export {
|
|
36
37
|
U as applyRoPE,
|
|
37
38
|
B as ropeCPU
|
|
@@ -1,39 +1,25 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
import {
|
|
5
|
-
import {
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
* You may obtain a copy of the License at
|
|
12
|
-
*
|
|
13
|
-
* http://www.apache.org/licenses/LICENSE-2.0
|
|
14
|
-
*
|
|
15
|
-
* Unless required by applicable law or agreed to in writing, software
|
|
16
|
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
17
|
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
18
|
-
* See the License for the specific language governing permissions and
|
|
19
|
-
* limitations under the License.
|
|
20
|
-
* =============================================================================
|
|
21
|
-
*/
|
|
22
|
-
function I(a, e, s) {
|
|
23
|
-
g(s);
|
|
24
|
-
const n = r(a, "indices", "scatterND", "int32"), t = r(e, "updates", "scatterND");
|
|
1
|
+
import { A as f, B as c, E as g, c as l, m as N } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { j as b, r as S } from "../../tensor_util-DV-FP5Q3.js";
|
|
3
|
+
import { d as h } from "../../tensor-DdQUJZlz.js";
|
|
4
|
+
import { v as D } from "../../scatter_nd_util-5EL-8VAQ.js";
|
|
5
|
+
import { r as k } from "../../range-BMS52eQi.js";
|
|
6
|
+
import { s as v } from "../../stack-yOIAalTq.js";
|
|
7
|
+
import { o as E } from "../../ones-CAMiP4I2.js";
|
|
8
|
+
function I(r, e, s) {
|
|
9
|
+
h(s);
|
|
10
|
+
const n = c(r, "indices", "scatterND", "int32"), t = c(e, "updates", "scatterND");
|
|
25
11
|
D(t, n, s);
|
|
26
|
-
const
|
|
27
|
-
return
|
|
12
|
+
const o = { indices: n, updates: t }, a = { shape: s };
|
|
13
|
+
return g.runKernel(b, o, a);
|
|
28
14
|
}
|
|
29
15
|
const K = /* @__PURE__ */ f({ scatterND_: I });
|
|
30
|
-
function L(
|
|
31
|
-
const { logits: e, labels: s, dy: n } =
|
|
32
|
-
return
|
|
16
|
+
function L(r) {
|
|
17
|
+
const { logits: e, labels: s, dy: n } = r.inputs, t = s.shape[0], o = e.shape[1], a = k(0, t, 1, "int32"), i = v([a, s], 1), d = E([t]), p = K(i, d, [t, o]), u = l(e, p), m = n.reshape([t, 1]);
|
|
18
|
+
return N(u, m);
|
|
33
19
|
}
|
|
34
20
|
const T = {
|
|
35
21
|
kernelName: "EfficientScatterSub",
|
|
36
22
|
backendName: "cpu",
|
|
37
23
|
kernelFunc: L
|
|
38
24
|
};
|
|
39
|
-
|
|
25
|
+
S(T);
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { b as d } from "../matMul16--R5hOwDG.js";
|
|
2
|
+
import { transpose16 as w } from "./transpose16.js";
|
|
3
|
+
import { reshape16 as n } from "./reshape16.js";
|
|
4
|
+
import { isPackedTensor as p } from "../utilities/packed.js";
|
|
5
|
+
import { d as x } from "../tfjs_backend-kNyO5L2d.js";
|
|
6
|
+
function E(e, s, h = !1, c = !1) {
|
|
7
|
+
if (!p(e) && !p(s))
|
|
8
|
+
return x(e, s);
|
|
9
|
+
if (e.rank < 2 || s.rank < 2)
|
|
10
|
+
throw new Error(
|
|
11
|
+
`dot requires both inputs to be rank >= 2 but got x shape = ${e.shape} and y shape = ${s.shape}`
|
|
12
|
+
);
|
|
13
|
+
if (s.rank >= 3) {
|
|
14
|
+
const r = e.shape.slice(-1)[0], i = s.shape.slice(-2)[0];
|
|
15
|
+
if (r !== i)
|
|
16
|
+
throw new Error(
|
|
17
|
+
`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${e.shape} and y shape = ${s.shape}`
|
|
18
|
+
);
|
|
19
|
+
}
|
|
20
|
+
if (!p(e) || !p(s))
|
|
21
|
+
throw new Error("dot16 requires both inputs to be packed Tensors.");
|
|
22
|
+
if (e.rank === 2 && s.rank === 2)
|
|
23
|
+
return d(e, s, h, c);
|
|
24
|
+
{
|
|
25
|
+
const r = e.shape.slice(), i = r.pop();
|
|
26
|
+
e = n(e, [-1, i]);
|
|
27
|
+
const a = s.shape.slice(), l = a.pop(), m = a.pop(), k = [...a, l], f = Array.from({ length: s.rank }, (o, t) => t === 0 ? s.rank - 2 : t <= s.rank - 2 ? t - 1 : t);
|
|
28
|
+
if (f.every((o, t) => o === t))
|
|
29
|
+
s = n(s, [m, -1]);
|
|
30
|
+
else {
|
|
31
|
+
const o = w(s, f);
|
|
32
|
+
s = n(o, [m, -1]), o.dispose();
|
|
33
|
+
}
|
|
34
|
+
const y = [...r, ...k], u = d(e, s, h, c);
|
|
35
|
+
e.dispose(), s.dispose();
|
|
36
|
+
const D = n(u, y);
|
|
37
|
+
return u.dispose(), D;
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
export {
|
|
41
|
+
E as dot16
|
|
42
|
+
};
|
package/dist/ops/gatherSub.js
CHANGED
package/dist/ops/gelu.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import "../index-
|
|
1
|
+
import "../index-ZyQhjEPo.js";
|
|
2
2
|
import "./cpu/gelu.js";
|
|
3
3
|
import "./webgl/gelu.js";
|
|
4
|
-
import { d as e, g as i } from "../gelu-
|
|
4
|
+
import { d as e, g as i } from "../gelu-CNLFZWea.js";
|
|
5
5
|
export {
|
|
6
6
|
e as dGelu,
|
|
7
7
|
i as gelu
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { j as u, q as d } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { sum16 as p } from "../sum16.js";
|
|
3
|
+
import { reshape16 as c } from "../reshape16.js";
|
|
4
|
+
import { a as h } from "../../tensor_util-DV-FP5Q3.js";
|
|
5
|
+
const m = {
|
|
6
|
+
kernelName: "Add16",
|
|
7
|
+
inputsToSave: ["a", "b"],
|
|
8
|
+
gradFunc: (s, i) => {
|
|
9
|
+
const [t, a] = i, n = u(t.shape, a.shape);
|
|
10
|
+
if (Array.isArray(s))
|
|
11
|
+
throw new Error("Add16 gradFunc expected dy to be a Tensor but got an array");
|
|
12
|
+
return { a: () => {
|
|
13
|
+
let e = s;
|
|
14
|
+
const r = d(t.shape, n);
|
|
15
|
+
r.length > 0 && (e = p(e, r));
|
|
16
|
+
const o = c(e, t.shape);
|
|
17
|
+
return e.dispose(), o;
|
|
18
|
+
}, b: () => {
|
|
19
|
+
let e = s;
|
|
20
|
+
const r = d(a.shape, n);
|
|
21
|
+
r.length > 0 && (e = p(e, r));
|
|
22
|
+
const o = c(e, a.shape);
|
|
23
|
+
return e.dispose(), o;
|
|
24
|
+
} };
|
|
25
|
+
}
|
|
26
|
+
};
|
|
27
|
+
h(m);
|
|
@@ -1,29 +1,22 @@
|
|
|
1
|
-
import
|
|
2
|
-
import {
|
|
3
|
-
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { m as o } from "../../matMul16--R5hOwDG.js";
|
|
3
|
+
import { transpose16 as m } from "../transpose16.js";
|
|
4
|
+
import { a as c } from "../../tensor_util-DV-FP5Q3.js";
|
|
5
|
+
const l = {
|
|
4
6
|
kernelName: "AttentionMask",
|
|
5
7
|
inputsToSave: ["q", "k"],
|
|
6
8
|
outputsToSave: [],
|
|
7
|
-
gradFunc: (
|
|
8
|
-
if (Array.isArray(
|
|
9
|
+
gradFunc: (r, s, n) => {
|
|
10
|
+
if (Array.isArray(r))
|
|
9
11
|
throw new Error("Expected dy to be a single Tensor");
|
|
10
|
-
const [
|
|
12
|
+
const [a, i] = s, { divisor: e } = n;
|
|
11
13
|
return {
|
|
12
|
-
q: () =>
|
|
14
|
+
q: () => o(r, i, e),
|
|
13
15
|
k: () => {
|
|
14
|
-
const
|
|
15
|
-
|
|
16
|
-
const o = r.transpose([0, 1, 3, 2]);
|
|
17
|
-
return r.dispose(), o;
|
|
18
|
-
},
|
|
19
|
-
mask: () => t,
|
|
20
|
-
divisor: () => {
|
|
21
|
-
const s = e.matMul(n, !1, !0), r = t.mul(s);
|
|
22
|
-
s.dispose();
|
|
23
|
-
const o = r.sum();
|
|
24
|
-
return r.dispose(), o;
|
|
16
|
+
const t = o(a, r, e, !0, !1), u = m(t, [0, 1, 3, 2]);
|
|
17
|
+
return t.dispose(), u;
|
|
25
18
|
}
|
|
26
19
|
};
|
|
27
20
|
}
|
|
28
21
|
};
|
|
29
|
-
|
|
22
|
+
c(l);
|
package/dist/ops/grads/gelu.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { a as m } from "../../gelu-CNLFZWea.js";
|
|
3
|
+
import "../../tensor_util-DV-FP5Q3.js";
|
|
3
4
|
export {
|
|
4
|
-
|
|
5
|
+
m as geluGradConfig
|
|
5
6
|
};
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { a as f } from "../../matMul16--R5hOwDG.js";
|
|
3
|
+
import "../../gelu-CNLFZWea.js";
|
|
4
|
+
import "../transpose16.js";
|
|
5
|
+
import "../reshape16.js";
|
|
6
|
+
import "../../tensor_util-DV-FP5Q3.js";
|
|
7
|
+
export {
|
|
8
|
+
f as matMul16GradConfig
|
|
9
|
+
};
|
|
@@ -1,17 +1,18 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
3
|
-
|
|
1
|
+
import { e as l } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { a as o } from "../../tensor_util-DV-FP5Q3.js";
|
|
3
|
+
function i(e, r, n) {
|
|
4
|
+
return l().runKernel("MatMulGeluGrad", { dy: e, x: r, kernel: n });
|
|
4
5
|
}
|
|
5
6
|
const s = {
|
|
6
7
|
kernelName: "MatMulGelu",
|
|
7
8
|
inputsToSave: ["x", "kernel"],
|
|
8
9
|
outputsToSave: [],
|
|
9
|
-
gradFunc: (e,
|
|
10
|
-
const [
|
|
10
|
+
gradFunc: (e, r) => {
|
|
11
|
+
const [n, t] = r, [u, a] = i(e, n, t);
|
|
11
12
|
return {
|
|
12
13
|
x: () => u,
|
|
13
|
-
kernel: () =>
|
|
14
|
+
kernel: () => a
|
|
14
15
|
};
|
|
15
16
|
}
|
|
16
17
|
};
|
|
17
|
-
|
|
18
|
+
o(s);
|
|
@@ -1,20 +1,21 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
3
|
-
|
|
1
|
+
import { e as t } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { a as g } from "../../tensor_util-DV-FP5Q3.js";
|
|
3
|
+
function i(r, a, m) {
|
|
4
|
+
return t().runKernel("RMSNormGrad", { dy: r, x: a, gamma: m });
|
|
4
5
|
}
|
|
5
6
|
const s = {
|
|
6
7
|
kernelName: "RMSNorm",
|
|
7
8
|
inputsToSave: ["x", "gamma"],
|
|
8
9
|
outputsToSave: [],
|
|
9
10
|
gradFunc: (r, a) => {
|
|
10
|
-
const [
|
|
11
|
+
const [m, n] = a, [o, e] = i(r, m, n);
|
|
11
12
|
return {
|
|
12
|
-
x: () =>
|
|
13
|
-
gamma: () =>
|
|
13
|
+
x: () => o,
|
|
14
|
+
gamma: () => e
|
|
14
15
|
};
|
|
15
16
|
}
|
|
16
17
|
};
|
|
17
|
-
|
|
18
|
+
g(s);
|
|
18
19
|
export {
|
|
19
20
|
s as normRMSGradConfig
|
|
20
21
|
};
|
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
import { GradConfig } from '@tensorflow/tfjs-core';
|
|
2
|
-
export declare const
|
|
2
|
+
export declare const packGradConfig: GradConfig;
|