@genai-fi/nanogpt 0.9.0 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +352 -14
- package/dist/Generator.js +69 -78
- package/dist/{RealDiv-D4EzDsC0.js → RealDiv-DgA3z9oO.js} +32 -206
- package/dist/Reshape-CF6odzV4.js +16 -0
- package/dist/Reshape-_kILl6tK.js +81 -0
- package/dist/TeachableLLM.js +28 -22
- package/dist/Trainer.d.ts +2 -0
- package/dist/Trainer.js +3 -2
- package/dist/{axis_util-TbGYJ208.js → axis_util-BvHEw88j.js} +7 -23
- package/dist/backend.d.ts +2 -1
- package/dist/backend.js +10 -4
- package/dist/backend_util-D-rUb2ty.js +474 -0
- package/dist/backend_webgpu-B0u2ndUn.js +547 -0
- package/dist/binary_op_util-pKXltfxI.js +192 -0
- package/dist/broadcast_to-CwF7XIeu.js +30 -0
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/check.d.ts +1 -1
- package/dist/checks/check.js +8 -8
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/index.d.ts +2 -0
- package/dist/checks/index.js +7 -5
- package/dist/checks/matMulGelu.js +6 -6
- package/dist/checks/normRMS.js +7 -7
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.d.ts +1 -0
- package/dist/checks/packUnpack.js +18 -0
- package/dist/checks/qkv.js +12 -27
- package/dist/checks/rope.js +2 -2
- package/dist/checks/weights.js +18 -16
- package/dist/complex-CSlYz-2T.js +13 -0
- package/dist/complex_util-Yc1A_gV1.js +55 -0
- package/dist/concat-BHlIJeyT.js +19 -0
- package/dist/concat_util-DcJk7YHS.js +22 -0
- package/dist/data/docx.js +1 -1
- package/dist/data/parquet.js +2 -2
- package/dist/data/pdf.js +1 -1
- package/dist/data/textLoader.js +1 -1
- package/dist/{dataset-DlZtKmBq.js → dataset-0xP8GjwI.js} +136 -236
- package/dist/dropout-C1pM3f11.js +99 -0
- package/dist/expand_dims-BPG4fwBP.js +13 -0
- package/dist/exports_initializers-xuidcwI4.js +7 -0
- package/dist/gather-DykLGqmW.js +10 -0
- package/dist/{gelu-Bp_-935b.js → gelu-CNLFZWea.js} +11 -10
- package/dist/{gpgpu_math-CDaYiyE_.js → gpgpu_math-DDVJCn6-.js} +90 -265
- package/dist/{index-C4L8Cm77.js → index-CieiGp4Y.js} +14 -14
- package/dist/index-CjOj7j-u.js +7308 -0
- package/dist/{index-Tf7vU29b.js → index-Cp39cXWe.js} +3 -10
- package/dist/{index-Dwqa6Zy2.js → index-DvYrXKkX.js} +2 -2
- package/dist/index-ZyQhjEPo.js +2157 -0
- package/dist/{jszip.min-CjP2V1VV.js → jszip.min-Bz5-11Bk.js} +56 -57
- package/dist/kernel_funcs_utils-Dg_-E44D.js +308 -0
- package/dist/layers/BaseLayer.d.ts +1 -0
- package/dist/layers/BaseLayer.js +7 -6
- package/dist/layers/CausalSelfAttention.d.ts +0 -1
- package/dist/layers/CausalSelfAttention.js +56 -55
- package/dist/layers/MLP.js +15 -16
- package/dist/layers/PositionEmbedding.js +5 -14
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.d.ts +2 -0
- package/dist/layers/RoPECache.js +22 -17
- package/dist/layers/TiedEmbedding.js +22 -17
- package/dist/layers/TransformerBlock.js +21 -20
- package/dist/loader/load.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +39 -33
- package/dist/loader/save.js +1 -1
- package/dist/log_sum_exp-DWI-76TI.js +41 -0
- package/dist/main.d.ts +8 -0
- package/dist/main.js +63 -52
- package/dist/matMul16--R5hOwDG.js +77 -0
- package/dist/mat_mul-DeAh4uTH.js +12 -0
- package/dist/mod-Gt1rMB4n.js +12 -0
- package/dist/models/NanoGPTV1.js +40 -31
- package/dist/models/model.d.ts +2 -0
- package/dist/models/model.js +37 -29
- package/dist/{mulmat_packed_gpu-BT60jmzP.js → mulmat_packed_gpu-BMFhLwta.js} +1 -17
- package/dist/{non_max_suppression_impl-CsEgBuMA.js → non_max_suppression_impl-B2W7YjZB.js} +0 -32
- package/dist/ones-CAMiP4I2.js +15 -0
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.d.ts +1 -1
- package/dist/ops/adamMoments.js +4 -4
- package/dist/ops/add16.d.ts +2 -0
- package/dist/ops/add16.js +9 -0
- package/dist/ops/appendCache.js +16 -9
- package/dist/ops/attentionMask.js +4 -4
- package/dist/ops/concat16.d.ts +2 -0
- package/dist/ops/concat16.js +9 -0
- package/dist/ops/cpu/adamAdjust.js +14 -13
- package/dist/ops/cpu/adamMoments.js +10 -9
- package/dist/ops/cpu/appendCache.js +9 -8
- package/dist/ops/cpu/attentionMask.js +15 -14
- package/dist/ops/cpu/fusedSoftmax.js +13 -12
- package/dist/ops/cpu/gatherSub.js +9 -24
- package/dist/ops/cpu/gelu.js +13 -12
- package/dist/ops/cpu/matMul16.d.ts +1 -0
- package/dist/ops/cpu/matMul16.js +16 -0
- package/dist/ops/cpu/matMulGelu.js +18 -16
- package/dist/ops/cpu/matMulMul.js +8 -7
- package/dist/ops/cpu/mulDropout.js +4 -3
- package/dist/ops/cpu/normRMS.js +11 -10
- package/dist/ops/cpu/qkv.js +17 -13
- package/dist/ops/cpu/rope.js +23 -22
- package/dist/ops/cpu/scatterSub.js +16 -30
- package/dist/ops/dot16.d.ts +2 -0
- package/dist/ops/dot16.js +42 -0
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.d.ts +1 -0
- package/dist/ops/grads/add16.js +27 -0
- package/dist/ops/grads/attentionMask.js +12 -19
- package/dist/ops/grads/gelu.js +4 -3
- package/dist/ops/grads/matMul16.d.ts +2 -0
- package/dist/ops/grads/matMul16.js +9 -0
- package/dist/ops/grads/matMulGelu.js +8 -7
- package/dist/ops/grads/normRMS.js +8 -7
- package/dist/ops/grads/{fusedSoftmax.d.ts → pack16.d.ts} +1 -1
- package/dist/ops/grads/pack16.js +7 -0
- package/dist/ops/grads/qkv.d.ts +3 -1
- package/dist/ops/grads/qkv.js +28 -22
- package/dist/ops/grads/rope.d.ts +2 -1
- package/dist/ops/grads/rope.js +6 -13
- package/dist/ops/grads/softmax16.d.ts +2 -0
- package/dist/ops/grads/softmax16.js +26 -0
- package/dist/ops/grads/unpack16.d.ts +2 -0
- package/dist/ops/grads/unpack16.js +6 -0
- package/dist/ops/grads/utils.d.ts +3 -0
- package/dist/ops/grads/utils.js +10 -0
- package/dist/ops/matMul16.d.ts +15 -0
- package/dist/ops/matMul16.js +13 -0
- package/dist/ops/matMulGelu.js +1 -1
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.d.ts +2 -0
- package/dist/ops/mul16.js +8 -0
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.d.ts +2 -0
- package/dist/ops/pack16.js +6 -0
- package/dist/ops/qkv.d.ts +1 -1
- package/dist/ops/qkv.js +8 -4
- package/dist/ops/reshape16.d.ts +2 -0
- package/dist/ops/reshape16.js +43 -0
- package/dist/ops/rope.d.ts +1 -1
- package/dist/ops/rope.js +8 -10
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.d.ts +2 -0
- package/dist/ops/slice16.js +9 -0
- package/dist/ops/softmax16.d.ts +2 -0
- package/dist/ops/softmax16.js +12 -0
- package/dist/ops/sub16.d.ts +2 -0
- package/dist/ops/sub16.js +8 -0
- package/dist/ops/sum16.d.ts +2 -0
- package/dist/ops/sum16.js +13 -0
- package/dist/ops/transpose16.d.ts +3 -0
- package/dist/ops/transpose16.js +41 -0
- package/dist/ops/unpack16.d.ts +2 -0
- package/dist/ops/unpack16.js +6 -0
- package/dist/ops/webgl/adamAdjust.js +3 -2
- package/dist/ops/webgl/adamMoments.js +2 -1
- package/dist/ops/webgl/appendCache.js +2 -1
- package/dist/ops/webgl/attentionMask.js +5 -4
- package/dist/ops/webgl/fusedSoftmax.js +6 -4
- package/dist/ops/webgl/gatherSub.js +7 -6
- package/dist/ops/webgl/gelu.js +3 -2
- package/dist/ops/webgl/log.js +12 -27
- package/dist/ops/webgl/matMul16.d.ts +1 -0
- package/dist/ops/webgl/matMul16.js +37 -0
- package/dist/ops/webgl/matMulGelu.js +17 -15
- package/dist/ops/webgl/matMulMul.js +13 -12
- package/dist/ops/webgl/mulDropout.js +9 -8
- package/dist/ops/webgl/normRMS.js +8 -7
- package/dist/ops/webgl/qkv.js +6 -5
- package/dist/ops/webgl/rope.js +11 -10
- package/dist/ops/webgl/scatterSub.js +6 -5
- package/dist/ops/webgpu/adamAdjust.js +12 -10
- package/dist/ops/webgpu/adamMoments.js +27 -22
- package/dist/ops/webgpu/add16.d.ts +1 -0
- package/dist/ops/webgpu/add16.js +14 -0
- package/dist/ops/webgpu/appendCache.js +64 -17
- package/dist/ops/webgpu/attentionMask.js +19 -62
- package/dist/ops/webgpu/attentionMask32_program.d.ts +19 -0
- package/dist/ops/webgpu/attentionMask32_program.js +54 -0
- package/dist/ops/webgpu/concat16.d.ts +19 -0
- package/dist/ops/webgpu/concat16.js +128 -0
- package/dist/ops/webgpu/gatherSub.js +9 -7
- package/dist/ops/webgpu/gelu.js +78 -31
- package/dist/ops/webgpu/index.js +12 -0
- package/dist/ops/webgpu/matMul16.d.ts +1 -0
- package/dist/ops/webgpu/matMul16.js +58 -0
- package/dist/ops/webgpu/matMul16_program.d.ts +42 -0
- package/dist/ops/webgpu/matMul16_program.js +336 -0
- package/dist/ops/webgpu/mul16.d.ts +1 -0
- package/dist/ops/webgpu/mul16.js +14 -0
- package/dist/ops/webgpu/normRMS.js +21 -40
- package/dist/ops/webgpu/normRMS16_program.d.ts +9 -0
- package/dist/ops/webgpu/normRMS16_program.js +24 -0
- package/dist/ops/webgpu/normRMS32_program.d.ts +9 -0
- package/dist/ops/webgpu/normRMS32_program.js +24 -0
- package/dist/ops/webgpu/normRMSGrad.js +113 -64
- package/dist/ops/webgpu/pack16.d.ts +1 -0
- package/dist/ops/webgpu/pack16.js +19 -0
- package/dist/ops/webgpu/pack16_program.d.ts +19 -0
- package/dist/ops/webgpu/pack16_program.js +92 -0
- package/dist/ops/webgpu/qkv.js +20 -55
- package/dist/ops/webgpu/rope.js +77 -22
- package/dist/ops/webgpu/scatterSub.js +9 -7
- package/dist/ops/webgpu/slice16.d.ts +7 -0
- package/dist/ops/webgpu/slice16.js +71 -0
- package/dist/{variable-Bm2OFwGI.js → ops/webgpu/softmax16.d.ts} +2 -8
- package/dist/ops/webgpu/softmax16.js +23 -0
- package/dist/ops/webgpu/softmax16_program.d.ts +13 -0
- package/dist/ops/webgpu/softmax16_program.js +73 -0
- package/dist/ops/webgpu/softmax16_subgroup_program.d.ts +17 -0
- package/dist/ops/webgpu/softmax16_subgroup_program.js +75 -0
- package/dist/ops/webgpu/softmax16grad.d.ts +1 -0
- package/dist/ops/webgpu/softmax16grad.js +38 -0
- package/dist/ops/webgpu/sub16.d.ts +1 -0
- package/dist/ops/webgpu/sub16.js +14 -0
- package/dist/ops/webgpu/sum16.d.ts +1 -0
- package/dist/ops/webgpu/sum16.js +40 -0
- package/dist/ops/webgpu/transpose16.d.ts +1 -0
- package/dist/ops/webgpu/transpose16.js +35 -0
- package/dist/ops/webgpu/transpose16_program.d.ts +16 -0
- package/dist/ops/webgpu/transpose16_program.js +50 -0
- package/dist/ops/webgpu/transpose16_shared_program.d.ts +15 -0
- package/dist/ops/webgpu/transpose16_shared_program.js +71 -0
- package/dist/ops/webgpu/unpack16.d.ts +1 -0
- package/dist/ops/webgpu/unpack16.js +49 -0
- package/dist/ops/webgpu/utils/binary_op.d.ts +19 -0
- package/dist/ops/webgpu/utils/binary_op.js +79 -0
- package/dist/ops/webgpu/utils/deviceInfo.d.ts +7 -0
- package/dist/ops/webgpu/utils/deviceInfo.js +11 -0
- package/dist/ops/webgpu/utils/reductions.d.ts +32 -4
- package/dist/ops/webgpu/utils/reductions.js +236 -45
- package/dist/ops-CNI3TwqM.js +645 -0
- package/dist/pack16-CFUqumar.js +41 -0
- package/dist/{papaparse.min-C8l2Kvo1.js → papaparse.min-C0cScC2i.js} +2 -8
- package/dist/{parquet-C0Tlmv9c.js → parquet-BE8MU_ge.js} +201 -278
- package/dist/patches/PackedTensor.d.ts +12 -0
- package/dist/patches/PackedTensor.js +11 -0
- package/dist/patches/engine.d.ts +261 -0
- package/dist/patches/engine.js +10 -0
- package/dist/patches/tape.d.ts +12 -0
- package/dist/patches/tape.js +5 -0
- package/dist/patches/webgpu_backend.d.ts +18 -0
- package/dist/patches/webgpu_backend.js +57 -0
- package/dist/{tensor-CZr4dh61.js → patches/webgpu_base.d.ts} +5 -8
- package/dist/patches/webgpu_base.js +34 -0
- package/dist/patches/webgpu_program.d.ts +36 -0
- package/dist/patches/webgpu_program.js +401 -0
- package/dist/{pdf-kJD-f258.js → pdf-NIhmP3sq.js} +424 -428
- package/dist/random_width-DY6Kk2Dl.js +10051 -0
- package/dist/range-BMS52eQi.js +11 -0
- package/dist/reciprocal-CTmshQ9J.js +10 -0
- package/dist/{register_all_kernels-DIGpEwcf.js → register_all_kernels-Bwu1PTuU.js} +719 -9766
- package/dist/relu-yZ2-7WxU.js +10 -0
- package/dist/reshape-DevtBWtf.js +10 -0
- package/dist/rope-B5UUMsPi.js +32 -0
- package/dist/{scatter_nd_util-BQdz--Gn.js → scatter_nd_util-5EL-8VAQ.js} +1 -1
- package/dist/selu_util-D1w6yyTO.js +303 -0
- package/dist/{shared-DuP7ue-R.js → shared-BRksrJb3.js} +1 -17
- package/dist/shared-BuAXb4CI.js +2145 -0
- package/dist/sin-BGfy2HZo.js +16 -0
- package/dist/slice-D_gkkqZK.js +13 -0
- package/dist/slice_util-DtEldBfK.js +261 -0
- package/dist/softmax-ZHVebtR1.js +13 -0
- package/dist/split-DrfihRpZ.js +10 -0
- package/dist/squeeze-DZEpeblb.js +11 -0
- package/dist/stack-yOIAalTq.js +13 -0
- package/dist/sum-_fzj5ZTB.js +12 -0
- package/dist/tensor-DdQUJZlz.js +909 -0
- package/dist/tensor-f35l8Odg.js +8 -0
- package/dist/tensor1d-CeZuc-Rv.js +12 -0
- package/dist/tensor2d-G4Ys2GxX.js +15 -0
- package/dist/tensor4d-B8roDgtc.js +15 -0
- package/dist/tensor_util-DV-FP5Q3.js +523 -0
- package/dist/tfjs_backend-kNyO5L2d.js +653 -0
- package/dist/tile-BzyEiF-F.js +13 -0
- package/dist/tokeniser/CharTokeniser.js +1 -1
- package/dist/tokeniser/bpe.js +1 -1
- package/dist/training/Adam.d.ts +2 -1
- package/dist/training/Adam.js +12 -28
- package/dist/training/AdamExt.d.ts +1 -0
- package/dist/training/AdamExt.js +2 -2
- package/dist/training/DatasetBuilder.js +3 -20
- package/dist/training/FullTrainer.js +82 -64
- package/dist/training/Trainer.d.ts +11 -6
- package/dist/training/Trainer.js +51 -39
- package/dist/training/sparseCrossEntropy.js +3 -3
- package/dist/transpose-DKELTqhe.js +38 -0
- package/dist/utilities/arrayClose.js +7 -7
- package/dist/utilities/dummy.js +35 -27
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.d.ts +7 -0
- package/dist/utilities/packed.js +716 -0
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.d.ts +5 -0
- package/dist/utilities/sentences.js +41 -0
- package/dist/utilities/weights.js +2 -2
- package/dist/variable-Bhn5bHYv.js +7 -0
- package/dist/{webgpu_program-DkQJOJSd.js → webgpu_program-Cigz-7RF.js} +15 -44
- package/dist/webgpu_util-BBCnKm2X.js +65 -0
- package/dist/zeros-2gldETuK.js +14 -0
- package/package.json +4 -3
- package/dist/Reshape-Bowtk9BP.js +0 -127
- package/dist/Reshape-DUqYftGC.js +0 -30
- package/dist/backend_util-CJIiDoV1.js +0 -749
- package/dist/broadcast_to-DzlNweb8.js +0 -44
- package/dist/concat-B912vBbo.js +0 -33
- package/dist/dropout-C-csYCLj.js +0 -193
- package/dist/exports_initializers-B8iZMgQ0.js +0 -16
- package/dist/gather-Dnpgw-YQ.js +0 -25
- package/dist/index-BzFyqcy-.js +0 -4457
- package/dist/index-C1rx_Ajs.js +0 -12076
- package/dist/kernel_funcs_utils-DKLK0Mg3.js +0 -466
- package/dist/log_sum_exp-DO6z8tSE.js +0 -103
- package/dist/mat_mul-DzjTFx-u.js +0 -27
- package/dist/mod-Dobti4j4.js +0 -27
- package/dist/ones-tIJeHlq-.js +0 -29
- package/dist/ops/fusedSoftmax.d.ts +0 -2
- package/dist/ops/fusedSoftmax.js +0 -10
- package/dist/ops/grads/fusedSoftmax.js +0 -22
- package/dist/ops-LuCMAnmM.js +0 -1525
- package/dist/random_width-CXVRloNK.js +0 -13670
- package/dist/range-CWcz7xFA.js +0 -26
- package/dist/reciprocal-C4rNcM-S.js +0 -25
- package/dist/relu-BjCh_SYb.js +0 -25
- package/dist/reshape-CnIwVG1c.js +0 -25
- package/dist/selu_util-OtRzVwW5.js +0 -719
- package/dist/shared-DmRsFyaJ.js +0 -3134
- package/dist/sin-gpDNRxE0.js +0 -47
- package/dist/slice-d0Vo9XTN.js +0 -28
- package/dist/softmax-D7Jj3p_P.js +0 -28
- package/dist/split-DK2k5eHf.js +0 -25
- package/dist/stack-DFatutCx.js +0 -27
- package/dist/sum-CJ0ULhmt.js +0 -27
- package/dist/tensor1d-vML0r3q6.js +0 -27
- package/dist/tensor2d-D76QGjF3.js +0 -30
- package/dist/tensor4d-Df1WlVDY.js +0 -30
- package/dist/webgpu_util-pLEV9tks.js +0 -80
- package/dist/zeros-Bj5rMYA7.js +0 -52
package/dist/ops/grads/qkv.d.ts
CHANGED
package/dist/ops/grads/qkv.js
CHANGED
|
@@ -1,30 +1,36 @@
|
|
|
1
|
-
import
|
|
2
|
-
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { a as u } from "../../matMul16--R5hOwDG.js";
|
|
3
|
+
import { concat16 as f } from "../concat16.js";
|
|
4
|
+
import { sum16 as g } from "../sum16.js";
|
|
5
|
+
import { packTensor as k, isPackedTensor as l } from "../../utilities/packed.js";
|
|
6
|
+
import { a as h } from "../../tensor_util-DV-FP5Q3.js";
|
|
7
|
+
import { s as G } from "../../squeeze-DZEpeblb.js";
|
|
8
|
+
const m = {
|
|
3
9
|
kernelName: "QKV",
|
|
4
10
|
inputsToSave: ["x", "kernel"],
|
|
5
11
|
outputsToSave: [],
|
|
6
|
-
gradFunc: (
|
|
7
|
-
const [
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
c.dispose();
|
|
18
|
-
const v = a.add(m).reshape([t, n, e]);
|
|
19
|
-
return a.dispose(), m.dispose(), v;
|
|
20
|
-
},
|
|
12
|
+
gradFunc: (e, s) => {
|
|
13
|
+
const [o, n, t] = e, [a] = s, p = f([o, n, t], 1);
|
|
14
|
+
o.dispose(), n.dispose(), t.dispose();
|
|
15
|
+
const c = [a.shape[0], a.shape[1], 3 * a.shape[2]], i = u.gradFunc(p, s, {
|
|
16
|
+
transposeA: !1,
|
|
17
|
+
transposeB: !1,
|
|
18
|
+
originalShape: c,
|
|
19
|
+
perm: [0, 2, 1, 3]
|
|
20
|
+
});
|
|
21
|
+
return p.dispose(), {
|
|
22
|
+
x: () => i.A(),
|
|
21
23
|
kernel: () => {
|
|
22
|
-
const
|
|
23
|
-
|
|
24
|
-
const a = s.matMul(k, !0, !1), c = r.concat(a, 1);
|
|
25
|
-
return r.dispose(), a.dispose(), s.dispose(), c;
|
|
24
|
+
const r = i.B(), d = r.shape[0] === 1 ? G(r, [0]) : g(r, 0);
|
|
25
|
+
return r.dispose(), l(r) ? k(d) : d;
|
|
26
26
|
}
|
|
27
27
|
};
|
|
28
28
|
}
|
|
29
29
|
};
|
|
30
|
-
|
|
30
|
+
function A(e, s, o) {
|
|
31
|
+
return m.gradFunc(e, [s, o], {});
|
|
32
|
+
}
|
|
33
|
+
h(m);
|
|
34
|
+
export {
|
|
35
|
+
A as qkvGrad
|
|
36
|
+
};
|
package/dist/ops/grads/rope.d.ts
CHANGED
|
@@ -1 +1,2 @@
|
|
|
1
|
-
|
|
1
|
+
import { GradConfig } from '@tensorflow/tfjs-core';
|
|
2
|
+
export declare const ropeGradConfig: GradConfig;
|
package/dist/ops/grads/rope.js
CHANGED
|
@@ -1,14 +1,7 @@
|
|
|
1
|
-
import
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
inputsToSave: ["sin", "cos"],
|
|
8
|
-
outputsToSave: [],
|
|
9
|
-
gradFunc: (n, e) => {
|
|
10
|
-
const [s, o] = e, t = s.neg(), r = p(n, t, o, 0);
|
|
11
|
-
return t.dispose(), { x: () => r };
|
|
12
|
-
}
|
|
1
|
+
import "../../utilities/packed.js";
|
|
2
|
+
import "../../index-ZyQhjEPo.js";
|
|
3
|
+
import { a as t } from "../../rope-B5UUMsPi.js";
|
|
4
|
+
import "../../tensor_util-DV-FP5Q3.js";
|
|
5
|
+
export {
|
|
6
|
+
t as ropeGradConfig
|
|
13
7
|
};
|
|
14
|
-
i(c);
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { e as n } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { isPackedTensor as t } from "../../utilities/packed.js";
|
|
3
|
+
import { a } from "../../tensor_util-DV-FP5Q3.js";
|
|
4
|
+
function s(r, e) {
|
|
5
|
+
return n().runKernel("Softmax16Grad", { dy: r, softmaxOutput: e });
|
|
6
|
+
}
|
|
7
|
+
const i = {
|
|
8
|
+
kernelName: "Softmax16",
|
|
9
|
+
outputsToSave: [!0],
|
|
10
|
+
gradFunc: (r, e) => {
|
|
11
|
+
const [o] = e;
|
|
12
|
+
if (Array.isArray(r))
|
|
13
|
+
throw new Error("Expected dy to be a single Tensor");
|
|
14
|
+
if (!t(o))
|
|
15
|
+
throw console.error(o), new Error("Softmax16 gradient requires packed y Tensor");
|
|
16
|
+
if (!t(r))
|
|
17
|
+
throw new Error("Softmax16 gradient requires packed dy Tensor");
|
|
18
|
+
return {
|
|
19
|
+
logits: () => s(r, o)
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
a(i);
|
|
24
|
+
export {
|
|
25
|
+
i as softmax16GradConfig
|
|
26
|
+
};
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { Tensor } from '@tensorflow/tfjs-core';
|
|
2
|
+
export declare function matMul16(A: Tensor, B: Tensor, transposeA?: boolean, transposeB?: boolean, attrs?: {
|
|
3
|
+
scale?: number;
|
|
4
|
+
scaleA?: number;
|
|
5
|
+
scaleB?: number;
|
|
6
|
+
activation?: 'gelu';
|
|
7
|
+
forceOutputShape?: number[];
|
|
8
|
+
perm?: number[];
|
|
9
|
+
causalMask?: boolean;
|
|
10
|
+
pastLen?: number;
|
|
11
|
+
}): Tensor;
|
|
12
|
+
export declare function matMul16Scaled(A: Tensor, B: Tensor, scale: number, transposeA?: boolean, transposeB?: boolean): Tensor;
|
|
13
|
+
export declare function matMul16ScaleA(A: Tensor, B: Tensor, scale: number, transposeA?: boolean, transposeB?: boolean): Tensor;
|
|
14
|
+
export declare function matMul16ScaleB(A: Tensor, B: Tensor, scale: number, transposeA?: boolean, transposeB?: boolean): Tensor;
|
|
15
|
+
export declare function matMul16Gelu(A: Tensor, B: Tensor, transposeA?: boolean, transposeB?: boolean): Tensor;
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import "../index-ZyQhjEPo.js";
|
|
2
|
+
import { b as p, c as u, d as i, e as s, m as M } from "../matMul16--R5hOwDG.js";
|
|
3
|
+
import "./webgl/matMul16.js";
|
|
4
|
+
import "./cpu/matMul16.js";
|
|
5
|
+
import "../utilities/packed.js";
|
|
6
|
+
import "../pack16-CFUqumar.js";
|
|
7
|
+
export {
|
|
8
|
+
p as matMul16,
|
|
9
|
+
u as matMul16Gelu,
|
|
10
|
+
i as matMul16ScaleA,
|
|
11
|
+
s as matMul16ScaleB,
|
|
12
|
+
M as matMul16Scaled
|
|
13
|
+
};
|
package/dist/ops/matMulGelu.js
CHANGED
package/dist/ops/matMulMul.js
CHANGED
package/dist/ops/mulDrop.js
CHANGED
package/dist/ops/normRMS.js
CHANGED
package/dist/ops/qkv.d.ts
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
import { Tensor } from '@tensorflow/tfjs-core';
|
|
2
|
-
export declare function qkv(x: Tensor, kernel: Tensor, heads: number): Tensor[];
|
|
2
|
+
export declare function qkv(x: Tensor, kernel: Tensor, heads: number, packed?: boolean): Tensor[];
|
package/dist/ops/qkv.js
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
import { e as
|
|
1
|
+
import { e as m } from "../index-ZyQhjEPo.js";
|
|
2
2
|
import "./cpu/qkv.js";
|
|
3
3
|
import "./webgl/qkv.js";
|
|
4
4
|
import "./grads/qkv.js";
|
|
5
|
-
|
|
6
|
-
|
|
5
|
+
import { packTensor as f } from "../utilities/packed.js";
|
|
6
|
+
function l(n, t, e, r = !1) {
|
|
7
|
+
const o = m().runKernel("QKV", { x: n, kernel: t }, { heads: e, packed: r });
|
|
8
|
+
return r && o.forEach((i) => {
|
|
9
|
+
f(i);
|
|
10
|
+
}), o;
|
|
7
11
|
}
|
|
8
12
|
export {
|
|
9
|
-
|
|
13
|
+
l as qkv
|
|
10
14
|
};
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import { e as c } from "../index-ZyQhjEPo.js";
|
|
2
|
+
import { isPackedTensor as u, packTensor as i } from "../utilities/packed.js";
|
|
3
|
+
import { r as p } from "../reshape-DevtBWtf.js";
|
|
4
|
+
import { a as l, r as t } from "../tensor_util-DV-FP5Q3.js";
|
|
5
|
+
const m = {
|
|
6
|
+
kernelName: "Reshape16",
|
|
7
|
+
inputsToSave: ["x"],
|
|
8
|
+
gradFunc: (e, r) => {
|
|
9
|
+
const [n] = r;
|
|
10
|
+
if (Array.isArray(e))
|
|
11
|
+
throw new Error("Reshape16 gradient does not support multiple outputs.");
|
|
12
|
+
return { x: () => f(e, n.shape) };
|
|
13
|
+
}
|
|
14
|
+
};
|
|
15
|
+
l(m);
|
|
16
|
+
function a(e) {
|
|
17
|
+
const { inputs: r, attrs: n } = e, { x: s } = r, { shape: o } = n;
|
|
18
|
+
return u(s) ? i(p(s, o)) : p(s, o);
|
|
19
|
+
}
|
|
20
|
+
const k = {
|
|
21
|
+
kernelName: "Reshape16",
|
|
22
|
+
backendName: "webgpu",
|
|
23
|
+
kernelFunc: a
|
|
24
|
+
};
|
|
25
|
+
t(k);
|
|
26
|
+
const g = {
|
|
27
|
+
kernelName: "Reshape16",
|
|
28
|
+
backendName: "webgl",
|
|
29
|
+
kernelFunc: a
|
|
30
|
+
};
|
|
31
|
+
t(g);
|
|
32
|
+
const h = {
|
|
33
|
+
kernelName: "Reshape16",
|
|
34
|
+
backendName: "cpu",
|
|
35
|
+
kernelFunc: a
|
|
36
|
+
};
|
|
37
|
+
t(h);
|
|
38
|
+
function f(e, r) {
|
|
39
|
+
return c().runKernel("Reshape16", { x: e }, { shape: r });
|
|
40
|
+
}
|
|
41
|
+
export {
|
|
42
|
+
f as reshape16
|
|
43
|
+
};
|
package/dist/ops/rope.d.ts
CHANGED
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
import { default as RoPECache } from '../layers/RoPECache';
|
|
2
2
|
import { Tensor } from '@tensorflow/tfjs';
|
|
3
|
-
export declare function rope(x: Tensor, cache: RoPECache, pastLength: number): Tensor;
|
|
3
|
+
export declare function rope(x: Tensor, cache: RoPECache, pastLength: number, negSin?: boolean): Tensor;
|
package/dist/ops/rope.js
CHANGED
|
@@ -1,14 +1,12 @@
|
|
|
1
|
-
import
|
|
2
|
-
import "../random_width-
|
|
3
|
-
import "../register_all_kernels-
|
|
4
|
-
import "../index-
|
|
5
|
-
import "../dataset-
|
|
1
|
+
import "../index-ZyQhjEPo.js";
|
|
2
|
+
import "../random_width-DY6Kk2Dl.js";
|
|
3
|
+
import "../register_all_kernels-Bwu1PTuU.js";
|
|
4
|
+
import "../index-Cp39cXWe.js";
|
|
5
|
+
import "../dataset-0xP8GjwI.js";
|
|
6
6
|
import "./cpu/rope.js";
|
|
7
7
|
import "./webgl/rope.js";
|
|
8
|
-
import "
|
|
9
|
-
|
|
10
|
-
return o.ensureRopeCache(r.shape[1] + e), p().runKernel("Rope", { x: r, sin: o.getSin(), cos: o.getCos() }, { pastLen: e });
|
|
11
|
-
}
|
|
8
|
+
import { r as x } from "../rope-B5UUMsPi.js";
|
|
9
|
+
import "../utilities/packed.js";
|
|
12
10
|
export {
|
|
13
|
-
|
|
11
|
+
x as rope
|
|
14
12
|
};
|
package/dist/ops/scatterSub.js
CHANGED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { isPackedTensor as n } from "../utilities/packed.js";
|
|
2
|
+
import { e as c } from "../index-ZyQhjEPo.js";
|
|
3
|
+
import { s as i } from "../slice-D_gkkqZK.js";
|
|
4
|
+
function a(r, e, o) {
|
|
5
|
+
return n(r) ? c().runKernel("Slice16", { x: r }, { begin: e, size: o }) : i(r, e, o);
|
|
6
|
+
}
|
|
7
|
+
export {
|
|
8
|
+
a as slice16
|
|
9
|
+
};
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { e } from "../index-ZyQhjEPo.js";
|
|
2
|
+
import "./grads/softmax16.js";
|
|
3
|
+
import { isPackedTensor as m, packTensor as a } from "../utilities/packed.js";
|
|
4
|
+
function p(r) {
|
|
5
|
+
if (!m(r))
|
|
6
|
+
return e().runKernel("Softmax", { logits: r }, { dim: r.rank - 1 });
|
|
7
|
+
const n = e().runKernel("Softmax16", { logits: r }, { dim: r.rank - 1 });
|
|
8
|
+
return m(r) ? a(n) : n;
|
|
9
|
+
}
|
|
10
|
+
export {
|
|
11
|
+
p as softmax16
|
|
12
|
+
};
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { e as t } from "../index-ZyQhjEPo.js";
|
|
2
|
+
import { isPackedTensor as s } from "../utilities/packed.js";
|
|
3
|
+
import { s as n } from "../sum-_fzj5ZTB.js";
|
|
4
|
+
function p(r, o, e = !1) {
|
|
5
|
+
if (!s(r))
|
|
6
|
+
return n(r, o, e);
|
|
7
|
+
if (e)
|
|
8
|
+
throw new Error("sum16 with keepDims=true not supported for packed tensors");
|
|
9
|
+
return t().runKernel("Sum16", { x: r }, { axis: o ?? -1, keepDims: e });
|
|
10
|
+
}
|
|
11
|
+
export {
|
|
12
|
+
p as sum16
|
|
13
|
+
};
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import { e as i } from "../index-ZyQhjEPo.js";
|
|
2
|
+
import { forceInt as u, forceFloat as l } from "./grads/utils.js";
|
|
3
|
+
import { g as m } from "../axis_util-BvHEw88j.js";
|
|
4
|
+
import { isPackedTensor as f, packTensor as g } from "../utilities/packed.js";
|
|
5
|
+
import { t as a } from "../transpose-DKELTqhe.js";
|
|
6
|
+
import { a as d, r as p } from "../tensor_util-DV-FP5Q3.js";
|
|
7
|
+
const k = {
|
|
8
|
+
kernelName: "Transpose16",
|
|
9
|
+
gradFunc: (e, s, o) => {
|
|
10
|
+
if (Array.isArray(e))
|
|
11
|
+
throw new Error("Transpose16 gradient does not support multiple outputs.");
|
|
12
|
+
const n = o, { perm: r } = n, t = m(r);
|
|
13
|
+
return { x: () => w(e, t) };
|
|
14
|
+
}
|
|
15
|
+
};
|
|
16
|
+
d(k);
|
|
17
|
+
function c(e) {
|
|
18
|
+
const { inputs: s, attrs: o } = e, { x: n } = s, { perm: r } = o, t = f(n);
|
|
19
|
+
if (t && r[r.length - 1] !== n.shape.length - 1)
|
|
20
|
+
throw new Error("Transpose16 currently only supports the last axis being unchanged.");
|
|
21
|
+
return t ? g(u(a(l(n), r))) : a(n, r);
|
|
22
|
+
}
|
|
23
|
+
const h = {
|
|
24
|
+
kernelName: "Transpose16",
|
|
25
|
+
backendName: "webgl",
|
|
26
|
+
kernelFunc: c
|
|
27
|
+
};
|
|
28
|
+
p(h);
|
|
29
|
+
const T = {
|
|
30
|
+
kernelName: "Transpose16",
|
|
31
|
+
backendName: "cpu",
|
|
32
|
+
kernelFunc: c
|
|
33
|
+
};
|
|
34
|
+
p(T);
|
|
35
|
+
function w(e, s) {
|
|
36
|
+
return s == null && (s = e.shape.map((n, r) => r).reverse()), i().runKernel("Transpose16", { x: e }, { perm: s });
|
|
37
|
+
}
|
|
38
|
+
export {
|
|
39
|
+
w as transpose16,
|
|
40
|
+
k as transpose16GradConfig
|
|
41
|
+
};
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
import { r as n } from "../../Reshape-
|
|
2
|
-
import
|
|
1
|
+
import { r as n } from "../../Reshape-_kILl6tK.js";
|
|
2
|
+
import "../../index-ZyQhjEPo.js";
|
|
3
|
+
import { r as f } from "../../tensor_util-DV-FP5Q3.js";
|
|
3
4
|
class v {
|
|
4
5
|
variableNames = ["moments", "value"];
|
|
5
6
|
outputShape;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { r as d } from "../../tensor_util-DV-FP5Q3.js";
|
|
2
3
|
class h {
|
|
3
4
|
variableNames = ["q", "k"];
|
|
4
5
|
outputShape;
|
|
@@ -34,12 +35,12 @@ class h {
|
|
|
34
35
|
}
|
|
35
36
|
}
|
|
36
37
|
function l(o) {
|
|
37
|
-
const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3],
|
|
38
|
-
return a.runWebGLProgram(
|
|
38
|
+
const { q: t, k: e } = o.inputs, { divisor: s, pastLen: n } = o.attrs, a = o.backend, i = t.shape[0], r = t.shape[2], c = e.shape[2], u = t.shape[1], p = t.shape[3], m = new h(i, u, r, c, p);
|
|
39
|
+
return a.runWebGLProgram(m, [t, e], "float32", [[s], [n], [Number.NEGATIVE_INFINITY]]);
|
|
39
40
|
}
|
|
40
41
|
const f = {
|
|
41
42
|
kernelName: "AttentionMask",
|
|
42
43
|
backendName: "webgl",
|
|
43
44
|
kernelFunc: l
|
|
44
45
|
};
|
|
45
|
-
|
|
46
|
+
d(f);
|
|
@@ -1,7 +1,9 @@
|
|
|
1
|
-
import { m as b, s as I, r as k } from "../../RealDiv-
|
|
2
|
-
import { r as v } from "../../Reshape-
|
|
3
|
-
import
|
|
4
|
-
import {
|
|
1
|
+
import { m as b, s as I, r as k } from "../../RealDiv-DgA3z9oO.js";
|
|
2
|
+
import { r as v } from "../../Reshape-_kILl6tK.js";
|
|
3
|
+
import "../../index-ZyQhjEPo.js";
|
|
4
|
+
import { r as w } from "../../tensor_util-DV-FP5Q3.js";
|
|
5
|
+
import { p as P } from "../../tensor-DdQUJZlz.js";
|
|
6
|
+
import { e as S } from "../../axis_util-BvHEw88j.js";
|
|
5
7
|
class T {
|
|
6
8
|
variableNames = ["logits", "maxLogits"];
|
|
7
9
|
outputShape;
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
import
|
|
2
|
-
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { r as i } from "../../tensor_util-DV-FP5Q3.js";
|
|
3
|
+
class l {
|
|
3
4
|
variableNames = ["labels", "logits", "values"];
|
|
4
5
|
outputShape;
|
|
5
6
|
userCode;
|
|
@@ -15,13 +16,13 @@ class u {
|
|
|
15
16
|
`;
|
|
16
17
|
}
|
|
17
18
|
}
|
|
18
|
-
function
|
|
19
|
-
const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new
|
|
19
|
+
function u(t) {
|
|
20
|
+
const { logits: e, labels: o, values: s } = t.inputs, a = t.backend, r = o.shape[0], n = new l(r);
|
|
20
21
|
return a.runWebGLProgram(n, [o, e, s], "float32");
|
|
21
22
|
}
|
|
22
23
|
const c = {
|
|
23
24
|
kernelName: "EfficientGatherSub",
|
|
24
25
|
backendName: "webgl",
|
|
25
|
-
kernelFunc:
|
|
26
|
+
kernelFunc: u
|
|
26
27
|
};
|
|
27
|
-
|
|
28
|
+
i(c);
|
package/dist/ops/webgl/gelu.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
import
|
|
2
|
-
import { u as s, C as i } from "../../kernel_funcs_utils-
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { u as s, C as i } from "../../kernel_funcs_utils-Dg_-E44D.js";
|
|
3
|
+
import { r as a } from "../../tensor_util-DV-FP5Q3.js";
|
|
3
4
|
const t = 0.7978845608028654, r = 0.044715, c = i + `
|
|
4
5
|
float x3 = x * x * x;
|
|
5
6
|
float inner = x + ${r} * x3;
|