@genai-fi/nanogpt 0.9.0 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +352 -14
- package/dist/Generator.js +69 -78
- package/dist/{RealDiv-D4EzDsC0.js → RealDiv-DgA3z9oO.js} +32 -206
- package/dist/Reshape-CF6odzV4.js +16 -0
- package/dist/Reshape-_kILl6tK.js +81 -0
- package/dist/TeachableLLM.js +28 -22
- package/dist/Trainer.d.ts +2 -0
- package/dist/Trainer.js +3 -2
- package/dist/{axis_util-TbGYJ208.js → axis_util-BvHEw88j.js} +7 -23
- package/dist/backend.d.ts +2 -1
- package/dist/backend.js +10 -4
- package/dist/backend_util-D-rUb2ty.js +474 -0
- package/dist/backend_webgpu-B0u2ndUn.js +547 -0
- package/dist/binary_op_util-pKXltfxI.js +192 -0
- package/dist/broadcast_to-CwF7XIeu.js +30 -0
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/check.d.ts +1 -1
- package/dist/checks/check.js +8 -8
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/index.d.ts +2 -0
- package/dist/checks/index.js +7 -5
- package/dist/checks/matMulGelu.js +6 -6
- package/dist/checks/normRMS.js +7 -7
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.d.ts +1 -0
- package/dist/checks/packUnpack.js +18 -0
- package/dist/checks/qkv.js +12 -27
- package/dist/checks/rope.js +2 -2
- package/dist/checks/weights.js +18 -16
- package/dist/complex-CSlYz-2T.js +13 -0
- package/dist/complex_util-Yc1A_gV1.js +55 -0
- package/dist/concat-BHlIJeyT.js +19 -0
- package/dist/concat_util-DcJk7YHS.js +22 -0
- package/dist/data/docx.js +1 -1
- package/dist/data/parquet.js +2 -2
- package/dist/data/pdf.js +1 -1
- package/dist/data/textLoader.js +1 -1
- package/dist/{dataset-DlZtKmBq.js → dataset-0xP8GjwI.js} +136 -236
- package/dist/dropout-C1pM3f11.js +99 -0
- package/dist/expand_dims-BPG4fwBP.js +13 -0
- package/dist/exports_initializers-xuidcwI4.js +7 -0
- package/dist/gather-DykLGqmW.js +10 -0
- package/dist/{gelu-Bp_-935b.js → gelu-CNLFZWea.js} +11 -10
- package/dist/{gpgpu_math-CDaYiyE_.js → gpgpu_math-DDVJCn6-.js} +90 -265
- package/dist/{index-C4L8Cm77.js → index-CieiGp4Y.js} +14 -14
- package/dist/index-CjOj7j-u.js +7308 -0
- package/dist/{index-Tf7vU29b.js → index-Cp39cXWe.js} +3 -10
- package/dist/{index-Dwqa6Zy2.js → index-DvYrXKkX.js} +2 -2
- package/dist/index-ZyQhjEPo.js +2157 -0
- package/dist/{jszip.min-CjP2V1VV.js → jszip.min-Bz5-11Bk.js} +56 -57
- package/dist/kernel_funcs_utils-Dg_-E44D.js +308 -0
- package/dist/layers/BaseLayer.d.ts +1 -0
- package/dist/layers/BaseLayer.js +7 -6
- package/dist/layers/CausalSelfAttention.d.ts +0 -1
- package/dist/layers/CausalSelfAttention.js +56 -55
- package/dist/layers/MLP.js +15 -16
- package/dist/layers/PositionEmbedding.js +5 -14
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.d.ts +2 -0
- package/dist/layers/RoPECache.js +22 -17
- package/dist/layers/TiedEmbedding.js +22 -17
- package/dist/layers/TransformerBlock.js +21 -20
- package/dist/loader/load.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +39 -33
- package/dist/loader/save.js +1 -1
- package/dist/log_sum_exp-DWI-76TI.js +41 -0
- package/dist/main.d.ts +8 -0
- package/dist/main.js +63 -52
- package/dist/matMul16--R5hOwDG.js +77 -0
- package/dist/mat_mul-DeAh4uTH.js +12 -0
- package/dist/mod-Gt1rMB4n.js +12 -0
- package/dist/models/NanoGPTV1.js +40 -31
- package/dist/models/model.d.ts +2 -0
- package/dist/models/model.js +37 -29
- package/dist/{mulmat_packed_gpu-BT60jmzP.js → mulmat_packed_gpu-BMFhLwta.js} +1 -17
- package/dist/{non_max_suppression_impl-CsEgBuMA.js → non_max_suppression_impl-B2W7YjZB.js} +0 -32
- package/dist/ones-CAMiP4I2.js +15 -0
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.d.ts +1 -1
- package/dist/ops/adamMoments.js +4 -4
- package/dist/ops/add16.d.ts +2 -0
- package/dist/ops/add16.js +9 -0
- package/dist/ops/appendCache.js +16 -9
- package/dist/ops/attentionMask.js +4 -4
- package/dist/ops/concat16.d.ts +2 -0
- package/dist/ops/concat16.js +9 -0
- package/dist/ops/cpu/adamAdjust.js +14 -13
- package/dist/ops/cpu/adamMoments.js +10 -9
- package/dist/ops/cpu/appendCache.js +9 -8
- package/dist/ops/cpu/attentionMask.js +15 -14
- package/dist/ops/cpu/fusedSoftmax.js +13 -12
- package/dist/ops/cpu/gatherSub.js +9 -24
- package/dist/ops/cpu/gelu.js +13 -12
- package/dist/ops/cpu/matMul16.d.ts +1 -0
- package/dist/ops/cpu/matMul16.js +16 -0
- package/dist/ops/cpu/matMulGelu.js +18 -16
- package/dist/ops/cpu/matMulMul.js +8 -7
- package/dist/ops/cpu/mulDropout.js +4 -3
- package/dist/ops/cpu/normRMS.js +11 -10
- package/dist/ops/cpu/qkv.js +17 -13
- package/dist/ops/cpu/rope.js +23 -22
- package/dist/ops/cpu/scatterSub.js +16 -30
- package/dist/ops/dot16.d.ts +2 -0
- package/dist/ops/dot16.js +42 -0
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.d.ts +1 -0
- package/dist/ops/grads/add16.js +27 -0
- package/dist/ops/grads/attentionMask.js +12 -19
- package/dist/ops/grads/gelu.js +4 -3
- package/dist/ops/grads/matMul16.d.ts +2 -0
- package/dist/ops/grads/matMul16.js +9 -0
- package/dist/ops/grads/matMulGelu.js +8 -7
- package/dist/ops/grads/normRMS.js +8 -7
- package/dist/ops/grads/{fusedSoftmax.d.ts → pack16.d.ts} +1 -1
- package/dist/ops/grads/pack16.js +7 -0
- package/dist/ops/grads/qkv.d.ts +3 -1
- package/dist/ops/grads/qkv.js +28 -22
- package/dist/ops/grads/rope.d.ts +2 -1
- package/dist/ops/grads/rope.js +6 -13
- package/dist/ops/grads/softmax16.d.ts +2 -0
- package/dist/ops/grads/softmax16.js +26 -0
- package/dist/ops/grads/unpack16.d.ts +2 -0
- package/dist/ops/grads/unpack16.js +6 -0
- package/dist/ops/grads/utils.d.ts +3 -0
- package/dist/ops/grads/utils.js +10 -0
- package/dist/ops/matMul16.d.ts +15 -0
- package/dist/ops/matMul16.js +13 -0
- package/dist/ops/matMulGelu.js +1 -1
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.d.ts +2 -0
- package/dist/ops/mul16.js +8 -0
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.d.ts +2 -0
- package/dist/ops/pack16.js +6 -0
- package/dist/ops/qkv.d.ts +1 -1
- package/dist/ops/qkv.js +8 -4
- package/dist/ops/reshape16.d.ts +2 -0
- package/dist/ops/reshape16.js +43 -0
- package/dist/ops/rope.d.ts +1 -1
- package/dist/ops/rope.js +8 -10
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.d.ts +2 -0
- package/dist/ops/slice16.js +9 -0
- package/dist/ops/softmax16.d.ts +2 -0
- package/dist/ops/softmax16.js +12 -0
- package/dist/ops/sub16.d.ts +2 -0
- package/dist/ops/sub16.js +8 -0
- package/dist/ops/sum16.d.ts +2 -0
- package/dist/ops/sum16.js +13 -0
- package/dist/ops/transpose16.d.ts +3 -0
- package/dist/ops/transpose16.js +41 -0
- package/dist/ops/unpack16.d.ts +2 -0
- package/dist/ops/unpack16.js +6 -0
- package/dist/ops/webgl/adamAdjust.js +3 -2
- package/dist/ops/webgl/adamMoments.js +2 -1
- package/dist/ops/webgl/appendCache.js +2 -1
- package/dist/ops/webgl/attentionMask.js +5 -4
- package/dist/ops/webgl/fusedSoftmax.js +6 -4
- package/dist/ops/webgl/gatherSub.js +7 -6
- package/dist/ops/webgl/gelu.js +3 -2
- package/dist/ops/webgl/log.js +12 -27
- package/dist/ops/webgl/matMul16.d.ts +1 -0
- package/dist/ops/webgl/matMul16.js +37 -0
- package/dist/ops/webgl/matMulGelu.js +17 -15
- package/dist/ops/webgl/matMulMul.js +13 -12
- package/dist/ops/webgl/mulDropout.js +9 -8
- package/dist/ops/webgl/normRMS.js +8 -7
- package/dist/ops/webgl/qkv.js +6 -5
- package/dist/ops/webgl/rope.js +11 -10
- package/dist/ops/webgl/scatterSub.js +6 -5
- package/dist/ops/webgpu/adamAdjust.js +12 -10
- package/dist/ops/webgpu/adamMoments.js +27 -22
- package/dist/ops/webgpu/add16.d.ts +1 -0
- package/dist/ops/webgpu/add16.js +14 -0
- package/dist/ops/webgpu/appendCache.js +64 -17
- package/dist/ops/webgpu/attentionMask.js +19 -62
- package/dist/ops/webgpu/attentionMask32_program.d.ts +19 -0
- package/dist/ops/webgpu/attentionMask32_program.js +54 -0
- package/dist/ops/webgpu/concat16.d.ts +19 -0
- package/dist/ops/webgpu/concat16.js +128 -0
- package/dist/ops/webgpu/gatherSub.js +9 -7
- package/dist/ops/webgpu/gelu.js +78 -31
- package/dist/ops/webgpu/index.js +12 -0
- package/dist/ops/webgpu/matMul16.d.ts +1 -0
- package/dist/ops/webgpu/matMul16.js +58 -0
- package/dist/ops/webgpu/matMul16_program.d.ts +42 -0
- package/dist/ops/webgpu/matMul16_program.js +336 -0
- package/dist/ops/webgpu/mul16.d.ts +1 -0
- package/dist/ops/webgpu/mul16.js +14 -0
- package/dist/ops/webgpu/normRMS.js +21 -40
- package/dist/ops/webgpu/normRMS16_program.d.ts +9 -0
- package/dist/ops/webgpu/normRMS16_program.js +24 -0
- package/dist/ops/webgpu/normRMS32_program.d.ts +9 -0
- package/dist/ops/webgpu/normRMS32_program.js +24 -0
- package/dist/ops/webgpu/normRMSGrad.js +113 -64
- package/dist/ops/webgpu/pack16.d.ts +1 -0
- package/dist/ops/webgpu/pack16.js +19 -0
- package/dist/ops/webgpu/pack16_program.d.ts +19 -0
- package/dist/ops/webgpu/pack16_program.js +92 -0
- package/dist/ops/webgpu/qkv.js +20 -55
- package/dist/ops/webgpu/rope.js +77 -22
- package/dist/ops/webgpu/scatterSub.js +9 -7
- package/dist/ops/webgpu/slice16.d.ts +7 -0
- package/dist/ops/webgpu/slice16.js +71 -0
- package/dist/{variable-Bm2OFwGI.js → ops/webgpu/softmax16.d.ts} +2 -8
- package/dist/ops/webgpu/softmax16.js +23 -0
- package/dist/ops/webgpu/softmax16_program.d.ts +13 -0
- package/dist/ops/webgpu/softmax16_program.js +73 -0
- package/dist/ops/webgpu/softmax16_subgroup_program.d.ts +17 -0
- package/dist/ops/webgpu/softmax16_subgroup_program.js +75 -0
- package/dist/ops/webgpu/softmax16grad.d.ts +1 -0
- package/dist/ops/webgpu/softmax16grad.js +38 -0
- package/dist/ops/webgpu/sub16.d.ts +1 -0
- package/dist/ops/webgpu/sub16.js +14 -0
- package/dist/ops/webgpu/sum16.d.ts +1 -0
- package/dist/ops/webgpu/sum16.js +40 -0
- package/dist/ops/webgpu/transpose16.d.ts +1 -0
- package/dist/ops/webgpu/transpose16.js +35 -0
- package/dist/ops/webgpu/transpose16_program.d.ts +16 -0
- package/dist/ops/webgpu/transpose16_program.js +50 -0
- package/dist/ops/webgpu/transpose16_shared_program.d.ts +15 -0
- package/dist/ops/webgpu/transpose16_shared_program.js +71 -0
- package/dist/ops/webgpu/unpack16.d.ts +1 -0
- package/dist/ops/webgpu/unpack16.js +49 -0
- package/dist/ops/webgpu/utils/binary_op.d.ts +19 -0
- package/dist/ops/webgpu/utils/binary_op.js +79 -0
- package/dist/ops/webgpu/utils/deviceInfo.d.ts +7 -0
- package/dist/ops/webgpu/utils/deviceInfo.js +11 -0
- package/dist/ops/webgpu/utils/reductions.d.ts +32 -4
- package/dist/ops/webgpu/utils/reductions.js +236 -45
- package/dist/ops-CNI3TwqM.js +645 -0
- package/dist/pack16-CFUqumar.js +41 -0
- package/dist/{papaparse.min-C8l2Kvo1.js → papaparse.min-C0cScC2i.js} +2 -8
- package/dist/{parquet-C0Tlmv9c.js → parquet-BE8MU_ge.js} +201 -278
- package/dist/patches/PackedTensor.d.ts +12 -0
- package/dist/patches/PackedTensor.js +11 -0
- package/dist/patches/engine.d.ts +261 -0
- package/dist/patches/engine.js +10 -0
- package/dist/patches/tape.d.ts +12 -0
- package/dist/patches/tape.js +5 -0
- package/dist/patches/webgpu_backend.d.ts +18 -0
- package/dist/patches/webgpu_backend.js +57 -0
- package/dist/{tensor-CZr4dh61.js → patches/webgpu_base.d.ts} +5 -8
- package/dist/patches/webgpu_base.js +34 -0
- package/dist/patches/webgpu_program.d.ts +36 -0
- package/dist/patches/webgpu_program.js +401 -0
- package/dist/{pdf-kJD-f258.js → pdf-NIhmP3sq.js} +424 -428
- package/dist/random_width-DY6Kk2Dl.js +10051 -0
- package/dist/range-BMS52eQi.js +11 -0
- package/dist/reciprocal-CTmshQ9J.js +10 -0
- package/dist/{register_all_kernels-DIGpEwcf.js → register_all_kernels-Bwu1PTuU.js} +719 -9766
- package/dist/relu-yZ2-7WxU.js +10 -0
- package/dist/reshape-DevtBWtf.js +10 -0
- package/dist/rope-B5UUMsPi.js +32 -0
- package/dist/{scatter_nd_util-BQdz--Gn.js → scatter_nd_util-5EL-8VAQ.js} +1 -1
- package/dist/selu_util-D1w6yyTO.js +303 -0
- package/dist/{shared-DuP7ue-R.js → shared-BRksrJb3.js} +1 -17
- package/dist/shared-BuAXb4CI.js +2145 -0
- package/dist/sin-BGfy2HZo.js +16 -0
- package/dist/slice-D_gkkqZK.js +13 -0
- package/dist/slice_util-DtEldBfK.js +261 -0
- package/dist/softmax-ZHVebtR1.js +13 -0
- package/dist/split-DrfihRpZ.js +10 -0
- package/dist/squeeze-DZEpeblb.js +11 -0
- package/dist/stack-yOIAalTq.js +13 -0
- package/dist/sum-_fzj5ZTB.js +12 -0
- package/dist/tensor-DdQUJZlz.js +909 -0
- package/dist/tensor-f35l8Odg.js +8 -0
- package/dist/tensor1d-CeZuc-Rv.js +12 -0
- package/dist/tensor2d-G4Ys2GxX.js +15 -0
- package/dist/tensor4d-B8roDgtc.js +15 -0
- package/dist/tensor_util-DV-FP5Q3.js +523 -0
- package/dist/tfjs_backend-kNyO5L2d.js +653 -0
- package/dist/tile-BzyEiF-F.js +13 -0
- package/dist/tokeniser/CharTokeniser.js +1 -1
- package/dist/tokeniser/bpe.js +1 -1
- package/dist/training/Adam.d.ts +2 -1
- package/dist/training/Adam.js +12 -28
- package/dist/training/AdamExt.d.ts +1 -0
- package/dist/training/AdamExt.js +2 -2
- package/dist/training/DatasetBuilder.js +3 -20
- package/dist/training/FullTrainer.js +82 -64
- package/dist/training/Trainer.d.ts +11 -6
- package/dist/training/Trainer.js +51 -39
- package/dist/training/sparseCrossEntropy.js +3 -3
- package/dist/transpose-DKELTqhe.js +38 -0
- package/dist/utilities/arrayClose.js +7 -7
- package/dist/utilities/dummy.js +35 -27
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.d.ts +7 -0
- package/dist/utilities/packed.js +716 -0
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.d.ts +5 -0
- package/dist/utilities/sentences.js +41 -0
- package/dist/utilities/weights.js +2 -2
- package/dist/variable-Bhn5bHYv.js +7 -0
- package/dist/{webgpu_program-DkQJOJSd.js → webgpu_program-Cigz-7RF.js} +15 -44
- package/dist/webgpu_util-BBCnKm2X.js +65 -0
- package/dist/zeros-2gldETuK.js +14 -0
- package/package.json +4 -3
- package/dist/Reshape-Bowtk9BP.js +0 -127
- package/dist/Reshape-DUqYftGC.js +0 -30
- package/dist/backend_util-CJIiDoV1.js +0 -749
- package/dist/broadcast_to-DzlNweb8.js +0 -44
- package/dist/concat-B912vBbo.js +0 -33
- package/dist/dropout-C-csYCLj.js +0 -193
- package/dist/exports_initializers-B8iZMgQ0.js +0 -16
- package/dist/gather-Dnpgw-YQ.js +0 -25
- package/dist/index-BzFyqcy-.js +0 -4457
- package/dist/index-C1rx_Ajs.js +0 -12076
- package/dist/kernel_funcs_utils-DKLK0Mg3.js +0 -466
- package/dist/log_sum_exp-DO6z8tSE.js +0 -103
- package/dist/mat_mul-DzjTFx-u.js +0 -27
- package/dist/mod-Dobti4j4.js +0 -27
- package/dist/ones-tIJeHlq-.js +0 -29
- package/dist/ops/fusedSoftmax.d.ts +0 -2
- package/dist/ops/fusedSoftmax.js +0 -10
- package/dist/ops/grads/fusedSoftmax.js +0 -22
- package/dist/ops-LuCMAnmM.js +0 -1525
- package/dist/random_width-CXVRloNK.js +0 -13670
- package/dist/range-CWcz7xFA.js +0 -26
- package/dist/reciprocal-C4rNcM-S.js +0 -25
- package/dist/relu-BjCh_SYb.js +0 -25
- package/dist/reshape-CnIwVG1c.js +0 -25
- package/dist/selu_util-OtRzVwW5.js +0 -719
- package/dist/shared-DmRsFyaJ.js +0 -3134
- package/dist/sin-gpDNRxE0.js +0 -47
- package/dist/slice-d0Vo9XTN.js +0 -28
- package/dist/softmax-D7Jj3p_P.js +0 -28
- package/dist/split-DK2k5eHf.js +0 -25
- package/dist/stack-DFatutCx.js +0 -27
- package/dist/sum-CJ0ULhmt.js +0 -27
- package/dist/tensor1d-vML0r3q6.js +0 -27
- package/dist/tensor2d-D76QGjF3.js +0 -30
- package/dist/tensor4d-Df1WlVDY.js +0 -30
- package/dist/webgpu_util-pLEV9tks.js +0 -80
- package/dist/zeros-Bj5rMYA7.js +0 -52
|
@@ -1,10 +1,15 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { createReduceInfo as
|
|
3
|
-
import { f as
|
|
4
|
-
import {
|
|
5
|
-
import {
|
|
6
|
-
import {
|
|
7
|
-
|
|
1
|
+
import { e as _ } from "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { createReduceInfo as D } from "./utils/reductions.js";
|
|
3
|
+
import { f as X } from "../../webgpu_util-BBCnKm2X.js";
|
|
4
|
+
import { e as $ } from "../../webgpu_program-Cigz-7RF.js";
|
|
5
|
+
import { j as z } from "../../tensor-DdQUJZlz.js";
|
|
6
|
+
import { p as k, u as M } from "../../pack16-CFUqumar.js";
|
|
7
|
+
import { isPackedTensor as h } from "../../utilities/packed.js";
|
|
8
|
+
import { reshape16 as R } from "../reshape16.js";
|
|
9
|
+
import { sum16 as L } from "../sum16.js";
|
|
10
|
+
import { slice16 as w } from "../slice16.js";
|
|
11
|
+
import { r as P } from "../../tensor_util-DV-FP5Q3.js";
|
|
12
|
+
class N {
|
|
8
13
|
outputShape;
|
|
9
14
|
shaderKey = "RMSNormGrad";
|
|
10
15
|
dispatchLayout;
|
|
@@ -15,53 +20,96 @@ class y {
|
|
|
15
20
|
inputShape;
|
|
16
21
|
size = !1;
|
|
17
22
|
rowsPerWorkgroup;
|
|
18
|
-
|
|
19
|
-
|
|
23
|
+
packed = !1;
|
|
24
|
+
outputComponent;
|
|
25
|
+
constructor(a, e = 4, o = !1) {
|
|
26
|
+
if (this.packed = o, this.shaderKey = `RMSNormGrad_${e}`, this.rowsPerWorkgroup = e, this.inputShape = [a.batchSize, a.inSize], this.outputShape = [a.batchSize + a.batchSize / this.rowsPerWorkgroup, a.inSize], this.dispatchLayout = X(this.outputShape), this.dispatch = [a.batchSize / this.rowsPerWorkgroup, 1, 1], a.batchSize % this.rowsPerWorkgroup !== 0)
|
|
20
27
|
throw new Error(
|
|
21
|
-
`RMSNormGradProgram: batch size ${
|
|
28
|
+
`RMSNormGradProgram: batch size ${a.batchSize} must be divisible by rowsPerWorkgroup ${this.rowsPerWorkgroup}`
|
|
22
29
|
);
|
|
23
|
-
if (
|
|
24
|
-
throw new Error(`RMSNormGradProgram: inSize ${
|
|
30
|
+
if (a.inSize > 1024)
|
|
31
|
+
throw new Error(`RMSNormGradProgram: inSize ${a.inSize} exceeds max of 1024`);
|
|
25
32
|
}
|
|
26
33
|
getUserCode() {
|
|
27
|
-
const
|
|
34
|
+
const a = this.workgroupSize[0], e = this.rowsPerWorkgroup, o = `
|
|
35
|
+
var<workgroup> partials : array<vec2<f32>, ${a}>;
|
|
36
|
+
var<workgroup> accumulation: array<${this.packed ? "vec2<f32>" : "f32"}, 1024>;
|
|
37
|
+
`, n = this.packed ? `
|
|
38
|
+
let X = unpack2x16float(u32(x[offset + k]));
|
|
39
|
+
let DY = unpack2x16float(u32(dy[offset + k]));
|
|
40
|
+
let G = unpack2x16float(u32(gamma[k]));
|
|
41
|
+
sum_x2 = fma(X.x, X.x, sum_x2);
|
|
42
|
+
sum_x2 = fma(X.y, X.y, sum_x2);
|
|
43
|
+
sum_dygx = fma(DY.x * G.x, X.x, sum_dygx);
|
|
44
|
+
sum_dygx = fma(DY.y * G.y, X.y, sum_dygx);
|
|
45
|
+
` : `
|
|
46
|
+
let X = f32(x[offset + k]);
|
|
47
|
+
let DY = f32(dy[offset + k]);
|
|
48
|
+
let G = f32(gamma[k]);
|
|
49
|
+
sum_x2 = fma(X, X, sum_x2);
|
|
50
|
+
sum_dygx = fma(DY * G, X, sum_dygx);
|
|
51
|
+
`, s = this.packed ? `
|
|
52
|
+
let X = unpack2x16float(u32(x[offset + k]));
|
|
53
|
+
let DY = unpack2x16float(u32(dy[offset + k]));
|
|
54
|
+
let G = unpack2x16float(u32(gamma[k]));
|
|
55
|
+
|
|
56
|
+
let dyGamma = DY * G;
|
|
57
|
+
let dx = vec2<f32>(
|
|
58
|
+
fma(dyGamma.x, invRMS, -X.x * scale),
|
|
59
|
+
fma(dyGamma.y, invRMS, -X.y * scale)
|
|
60
|
+
);
|
|
61
|
+
|
|
62
|
+
result[offset + k] = i32(pack2x16float(dx));
|
|
63
|
+
|
|
64
|
+
// dGamma
|
|
65
|
+
accumulation[k] = fma(DY, X * invRMS, accumulation[k]);
|
|
66
|
+
` : `
|
|
67
|
+
let X = f32(x[offset + k]);
|
|
68
|
+
let DY = f32(dy[offset + k]);
|
|
69
|
+
let G = f32(gamma[k]);
|
|
70
|
+
|
|
71
|
+
let dyGamma = DY * G;
|
|
72
|
+
let dx = fma(dyGamma, invRMS, -X * scale);
|
|
73
|
+
|
|
74
|
+
result[offset + k] = dx;
|
|
75
|
+
|
|
76
|
+
// dGamma
|
|
77
|
+
accumulation[k] = fma(DY, X * invRMS, accumulation[k]);
|
|
78
|
+
`, i = this.packed ? `
|
|
79
|
+
result[outDgBase + k] = i32(pack2x16float(accumulation[k]));
|
|
80
|
+
` : `
|
|
81
|
+
result[outDgBase + k] = accumulation[k];
|
|
82
|
+
`;
|
|
28
83
|
return `
|
|
29
84
|
fn DIV_CEIL(a : u32, b : u32) -> u32 {
|
|
30
85
|
return ((a - 1u) / b + 1u);
|
|
31
86
|
}
|
|
32
87
|
|
|
33
|
-
${
|
|
34
|
-
var<workgroup> partials : array<vec2<f32>, ${r}>;
|
|
35
|
-
var<workgroup> accumulation: array<f32, 1024>;
|
|
36
|
-
`}
|
|
88
|
+
${o}
|
|
37
89
|
|
|
38
|
-
${
|
|
90
|
+
${$("index")} {
|
|
39
91
|
// One workgroup per row (batch).
|
|
40
92
|
let Length = uniforms.reduceSize;
|
|
41
93
|
let BatchSize = uniforms.batchSize;
|
|
42
|
-
for (var k = i32(localId.x); k < Length; k = k + ${
|
|
43
|
-
accumulation[k] = 0.0;
|
|
94
|
+
for (var k = i32(localId.x); k < Length; k = k + ${a}) {
|
|
95
|
+
accumulation[k] = ${this.packed ? "vec2<f32>(0.0f)" : "0.0f"};
|
|
44
96
|
}
|
|
45
97
|
|
|
46
98
|
for (var rowOff = 0; rowOff < ${e}; rowOff = rowOff + 1) {
|
|
47
99
|
let row = i32(workgroupId.x) * ${e} + rowOff;
|
|
48
100
|
let offset = row * Length;
|
|
49
101
|
|
|
50
|
-
var sum_x2 = 0.
|
|
51
|
-
var sum_dygx = 0.
|
|
52
|
-
|
|
53
|
-
for (var k = i32(localId.x); k < Length; k = k + ${
|
|
54
|
-
|
|
55
|
-
let DY = f32(dy[offset + k]);
|
|
56
|
-
let G = f32(gamma[k]);
|
|
57
|
-
sum_x2 = fma(X, X, sum_x2);
|
|
58
|
-
sum_dygx = fma(DY * G, X, sum_dygx);
|
|
102
|
+
var sum_x2 = 0.0f;
|
|
103
|
+
var sum_dygx = 0.0f;
|
|
104
|
+
|
|
105
|
+
for (var k = i32(localId.x); k < Length; k = k + ${a}) {
|
|
106
|
+
${n}
|
|
59
107
|
}
|
|
60
108
|
|
|
61
109
|
partials[localId.x] = vec2<f32>(sum_x2, sum_dygx);
|
|
62
110
|
workgroupBarrier();
|
|
63
111
|
|
|
64
|
-
var reduceSize = min(u32(Length), ${
|
|
112
|
+
var reduceSize = min(u32(Length), ${a}u);
|
|
65
113
|
for (var currentSize = reduceSize / 2u; reduceSize > 1u; currentSize = reduceSize / 2u) {
|
|
66
114
|
let interval = DIV_CEIL(reduceSize, 2u);
|
|
67
115
|
if (localId.x < currentSize) {
|
|
@@ -71,7 +119,7 @@ class y {
|
|
|
71
119
|
workgroupBarrier();
|
|
72
120
|
}
|
|
73
121
|
|
|
74
|
-
let invN = 1.
|
|
122
|
+
let invN = 1.0f / f32(${this.packed ? "Length * 2" : "Length"});
|
|
75
123
|
let mean_x2 = fma(partials[0].x, invN, 1e-8);
|
|
76
124
|
let mean_dygx = partials[0].y * invN;
|
|
77
125
|
|
|
@@ -79,18 +127,8 @@ class y {
|
|
|
79
127
|
let scale = (mean_dygx / (mean_x2)) * invRMS;
|
|
80
128
|
|
|
81
129
|
// write dx and dGamma.
|
|
82
|
-
for (var k = i32(localId.x); k < Length; k = k + ${
|
|
83
|
-
|
|
84
|
-
let DY = f32(dy[offset + k]);
|
|
85
|
-
let G = f32(gamma[k]);
|
|
86
|
-
|
|
87
|
-
let dyGamma = DY * G;
|
|
88
|
-
let dx = fma(dyGamma, invRMS, -X * scale);
|
|
89
|
-
|
|
90
|
-
result[offset + k] = dx;
|
|
91
|
-
|
|
92
|
-
// dGamma
|
|
93
|
-
accumulation[k] = fma(DY, X * invRMS, accumulation[k]);
|
|
130
|
+
for (var k = i32(localId.x); k < Length; k = k + ${a}) {
|
|
131
|
+
${s}
|
|
94
132
|
}
|
|
95
133
|
|
|
96
134
|
workgroupBarrier();
|
|
@@ -98,36 +136,47 @@ class y {
|
|
|
98
136
|
|
|
99
137
|
// Write out the partially accumulated dGamma
|
|
100
138
|
let outDgBase = BatchSize * Length + i32(workgroupId.x) * Length;
|
|
101
|
-
for (var k = i32(localId.x); k < Length; k = k + ${
|
|
102
|
-
|
|
139
|
+
for (var k = i32(localId.x); k < Length; k = k + ${a}) {
|
|
140
|
+
${i}
|
|
103
141
|
}
|
|
104
142
|
}
|
|
105
143
|
`;
|
|
106
144
|
}
|
|
107
145
|
}
|
|
108
|
-
function
|
|
109
|
-
const { dy:
|
|
110
|
-
|
|
111
|
-
const
|
|
112
|
-
|
|
146
|
+
function W(p) {
|
|
147
|
+
const { dy: a, x: e, gamma: o } = p.inputs, n = 4;
|
|
148
|
+
z(e.shape, a.shape, "Error in RMSNormGrad dy: ");
|
|
149
|
+
const s = h(e), i = h(o), u = h(a), r = s || i || u, m = !r || s ? e : k(e), c = !r || i ? o : k(o), d = !r || u ? a : k(a);
|
|
150
|
+
z(c.shape, [m.shape[m.shape.length - 1]], "Error in RMSNormGrad gamma: ");
|
|
151
|
+
const G = p.backend, t = D([m, c, d], -1), f = new N(t, n, r), v = [
|
|
152
|
+
{ type: "int32", data: [f.inputShape[1]] },
|
|
113
153
|
// Reduce size
|
|
114
|
-
{ type: "int32", data: [
|
|
154
|
+
{ type: "int32", data: [f.inputShape[0]] }
|
|
115
155
|
// Batch size
|
|
116
156
|
];
|
|
117
|
-
if (
|
|
118
|
-
throw new Error(`rmsNormGradGPU: inSize ${
|
|
119
|
-
const
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
157
|
+
if (t.inSize > 1024)
|
|
158
|
+
throw new Error(`rmsNormGradGPU: inSize ${t.inSize} exceeds max of 1024`);
|
|
159
|
+
const x = G.runWebGPUProgram(
|
|
160
|
+
f,
|
|
161
|
+
[m, c, d],
|
|
162
|
+
r ? "int32" : "float32",
|
|
163
|
+
v
|
|
164
|
+
);
|
|
165
|
+
x.packed = r, r && !s && m.dispose(), r && !i && c.dispose(), r && !u && d.dispose();
|
|
166
|
+
const l = _().makeTensorFromTensorInfo(x), S = w(l, [0, 0], [t.batchSize, t.inSize]), g = w(
|
|
167
|
+
l,
|
|
168
|
+
[t.batchSize, 0],
|
|
169
|
+
[t.batchSize / n, t.inSize]
|
|
170
|
+
);
|
|
171
|
+
l.dispose();
|
|
172
|
+
const b = R(S, e.shape);
|
|
173
|
+
S.dispose();
|
|
174
|
+
const y = L(g, [0]);
|
|
175
|
+
return g.dispose(), [b, !r || i ? y : M(y)];
|
|
127
176
|
}
|
|
128
|
-
const
|
|
177
|
+
const Y = {
|
|
129
178
|
kernelName: "RMSNormGrad",
|
|
130
179
|
backendName: "webgpu",
|
|
131
|
-
kernelFunc:
|
|
180
|
+
kernelFunc: W
|
|
132
181
|
};
|
|
133
|
-
|
|
182
|
+
P(Y);
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import c from "./pack16_program.js";
|
|
3
|
+
import { r as p } from "../../tensor_util-DV-FP5Q3.js";
|
|
4
|
+
function m(n) {
|
|
5
|
+
const { x: e } = n.inputs, { scaling: t, padding: r } = n.attrs, i = n.backend;
|
|
6
|
+
if (e.shape[e.shape.length - 1] % 2 !== 0)
|
|
7
|
+
throw new Error("Last dimension of input tensor must be even to use Pack16.");
|
|
8
|
+
n.attrs && (n.attrs.originalShape = e.shape);
|
|
9
|
+
const a = new c(e.shape, r), o = t !== 1;
|
|
10
|
+
o && a.useScaling();
|
|
11
|
+
const s = [{ type: "float32", data: [t] }];
|
|
12
|
+
return i.runWebGPUProgram(a, [e], "int32", o ? s : void 0);
|
|
13
|
+
}
|
|
14
|
+
const u = {
|
|
15
|
+
kernelName: "Pack16",
|
|
16
|
+
backendName: "webgpu",
|
|
17
|
+
kernelFunc: m
|
|
18
|
+
};
|
|
19
|
+
p(u);
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { WebGPUProgram } from '@tensorflow/tfjs-backend-webgpu';
|
|
2
|
+
export default class PackProgram implements WebGPUProgram {
|
|
3
|
+
outputShape: number[];
|
|
4
|
+
shaderKey: string;
|
|
5
|
+
dispatchLayout: {
|
|
6
|
+
x: number[];
|
|
7
|
+
};
|
|
8
|
+
dispatch: [number, number, number];
|
|
9
|
+
workgroupSize: [number, number, number];
|
|
10
|
+
variableNames: string[];
|
|
11
|
+
uniforms?: string;
|
|
12
|
+
size: boolean;
|
|
13
|
+
outputComponent: number;
|
|
14
|
+
scaling: boolean;
|
|
15
|
+
padding: number;
|
|
16
|
+
constructor(outShape: number[], padding?: number);
|
|
17
|
+
useScaling(): void;
|
|
18
|
+
getUserCode(): string;
|
|
19
|
+
}
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import { f as o, c as a } from "../../webgpu_util-BBCnKm2X.js";
|
|
2
|
+
import { e as s } from "../../webgpu_program-Cigz-7RF.js";
|
|
3
|
+
class h {
|
|
4
|
+
outputShape;
|
|
5
|
+
shaderKey = "Pack16";
|
|
6
|
+
dispatchLayout;
|
|
7
|
+
dispatch;
|
|
8
|
+
workgroupSize = [64, 1, 1];
|
|
9
|
+
variableNames = ["x"];
|
|
10
|
+
uniforms;
|
|
11
|
+
size = !0;
|
|
12
|
+
outputComponent = 4;
|
|
13
|
+
scaling = !1;
|
|
14
|
+
padding = 0;
|
|
15
|
+
constructor(t, e = 0) {
|
|
16
|
+
if (t[t.length - 1] % 2 !== 0 && e === 0)
|
|
17
|
+
throw new Error("Last dimension of output shape must be even to use Pack16.");
|
|
18
|
+
if (e % 4 !== 0)
|
|
19
|
+
throw new Error("Padding must be a multiple of 4 to use Pack16.");
|
|
20
|
+
if (this.outputShape = [...t.slice(0, -1), t[t.length - 1]], e > 0) {
|
|
21
|
+
this.shaderKey += `_Padded${e}`, this.padding = e;
|
|
22
|
+
for (let i = this.outputShape.length - 2; i < this.outputShape.length; i++)
|
|
23
|
+
this.outputShape[i] % this.padding !== 0 && (this.outputShape[i] += this.padding - this.outputShape[i] % this.padding);
|
|
24
|
+
this.outputComponent = 1;
|
|
25
|
+
}
|
|
26
|
+
this.outputShape[this.outputShape.length - 1] /= 2, this.outputShape[this.outputShape.length - 1] % this.outputComponent !== 0 && (this.outputComponent = 1), this.dispatchLayout = o(this.outputShape), this.dispatch = a(this.dispatchLayout, this.outputShape, this.workgroupSize, [
|
|
27
|
+
this.outputComponent,
|
|
28
|
+
1,
|
|
29
|
+
1
|
|
30
|
+
]);
|
|
31
|
+
}
|
|
32
|
+
useScaling() {
|
|
33
|
+
this.shaderKey += "_Scaled", this.uniforms = "scaling : f32,", this.scaling = !0;
|
|
34
|
+
}
|
|
35
|
+
getUserCode() {
|
|
36
|
+
if (this.padding > 0 && this.outputComponent === 1) {
|
|
37
|
+
const t = this.outputShape.length;
|
|
38
|
+
return `
|
|
39
|
+
${s("index")} {
|
|
40
|
+
if (index < uniforms.size) {
|
|
41
|
+
var coords = getCoordsFromIndex(index);
|
|
42
|
+
coords[${t} - 1] = coords[${t} - 1] * 2;
|
|
43
|
+
let row = coords[${t} - 2];
|
|
44
|
+
let col = coords[${t} - 1];
|
|
45
|
+
let width = uniforms.xShape[${t} - 1];
|
|
46
|
+
let height = uniforms.xShape[${t} - 2];
|
|
47
|
+
|
|
48
|
+
var value1 = 0.0f;
|
|
49
|
+
if (col < width && row < height) {
|
|
50
|
+
let baseInputIndex = getIndexFromCoords${t}D(coords, uniforms.xShape);
|
|
51
|
+
value1 = x[baseInputIndex] ${this.scaling ? "* uniforms.scaling" : ""};
|
|
52
|
+
}
|
|
53
|
+
var value2 = 0.0f;
|
|
54
|
+
if (col + 1 < width && row < height) {
|
|
55
|
+
coords[${t} - 1] = coords[${t} - 1] + 1;
|
|
56
|
+
let baseInputIndex = getIndexFromCoords${t}D(coords, uniforms.xShape);
|
|
57
|
+
value2 = x[baseInputIndex] ${this.scaling ? "* uniforms.scaling" : ""};
|
|
58
|
+
}
|
|
59
|
+
let packed = i32(pack2x16float(vec2<f32>(value1, value2)));
|
|
60
|
+
result[index] = packed;
|
|
61
|
+
}
|
|
62
|
+
}`;
|
|
63
|
+
}
|
|
64
|
+
return this.outputComponent === 1 ? `
|
|
65
|
+
${s("index")} {
|
|
66
|
+
if (index < uniforms.size) {
|
|
67
|
+
let baseInputIndex = index * 2;
|
|
68
|
+
let x1 = x[baseInputIndex] ${this.scaling ? "* uniforms.scaling" : ""};
|
|
69
|
+
let x2 = x[baseInputIndex + 1] ${this.scaling ? "* uniforms.scaling" : ""};
|
|
70
|
+
let packed = i32(pack2x16float(vec2<f32>(x1, x2)));
|
|
71
|
+
result[index] = packed;
|
|
72
|
+
}
|
|
73
|
+
}` : `
|
|
74
|
+
${s("index")} {
|
|
75
|
+
if (index < uniforms.size) {
|
|
76
|
+
let baseInputIndex = index * 2;
|
|
77
|
+
let x1 = x[baseInputIndex] ${this.scaling ? "* uniforms.scaling" : ""};
|
|
78
|
+
let x2 = x[baseInputIndex + 1] ${this.scaling ? "* uniforms.scaling" : ""};
|
|
79
|
+
let packed = vec4<i32>(
|
|
80
|
+
i32(pack2x16float(vec2<f32>(x1.x, x1.y))),
|
|
81
|
+
i32(pack2x16float(vec2<f32>(x1.z, x1.w))),
|
|
82
|
+
i32(pack2x16float(vec2<f32>(x2.x, x2.y))),
|
|
83
|
+
i32(pack2x16float(vec2<f32>(x2.z, x2.w)))
|
|
84
|
+
);
|
|
85
|
+
result[index] = packed;
|
|
86
|
+
}
|
|
87
|
+
}`;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
export {
|
|
91
|
+
h as default
|
|
92
|
+
};
|
package/dist/ops/webgpu/qkv.js
CHANGED
|
@@ -1,61 +1,26 @@
|
|
|
1
|
-
import
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
const t = this.outputShape[1], e = this.outputShape[3], o = t * e;
|
|
19
|
-
return `
|
|
20
|
-
${h("index")} {
|
|
21
|
-
if (index < uniforms.size) {
|
|
22
|
-
let coords = getCoordsFromIndex(index); // [b, h, t, d]
|
|
23
|
-
let b = coords[0];
|
|
24
|
-
let h = coords[1];
|
|
25
|
-
let t = coords[2];
|
|
26
|
-
let d = coords[3];
|
|
27
|
-
|
|
28
|
-
// Compute output channel index in fused kernel
|
|
29
|
-
let out_offset = uniforms.mode * ${o} + h * ${e} + d;
|
|
30
|
-
|
|
31
|
-
var sum = 0.0;
|
|
32
|
-
let baseX = b * uniforms.xShape[1] * uniforms.xShape[2] + t * uniforms.xShape[2];
|
|
33
|
-
for (var c = 0; c < ${o}; c += 1) {
|
|
34
|
-
let xval = x[baseX + c];
|
|
35
|
-
let kval = getKernel(c, out_offset);
|
|
36
|
-
sum = fma(xval, kval, sum);
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
setOutputAtIndex(index, sum);
|
|
40
|
-
}
|
|
41
|
-
}
|
|
42
|
-
`;
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
function f(r) {
|
|
46
|
-
const { x: t, kernel: e } = r.inputs, { heads: o } = r.attrs, s = r.backend, n = t.shape[0], u = t.shape[1], a = t.shape[2];
|
|
47
|
-
if (m(e.shape, [a, 3 * a], "Error in QKV: "), a % o !== 0)
|
|
48
|
-
throw new Error(`Channel dimension ${a} must be divisible by number of heads ${o} in QKV.`);
|
|
49
|
-
const i = new l(n, o, u, a);
|
|
50
|
-
return [
|
|
51
|
-
s.runWebGPUProgram(i, [t, e], "float32", [{ type: "int32", data: [0] }]),
|
|
52
|
-
s.runWebGPUProgram(i, [t, e], "float32", [{ type: "int32", data: [1] }]),
|
|
53
|
-
s.runWebGPUProgram(i, [t, e], "float32", [{ type: "int32", data: [2] }])
|
|
1
|
+
import "../../index-ZyQhjEPo.js";
|
|
2
|
+
import { j as h } from "../../tensor-DdQUJZlz.js";
|
|
3
|
+
import { b as f } from "../../matMul16--R5hOwDG.js";
|
|
4
|
+
import { slice16 as a } from "../slice16.js";
|
|
5
|
+
import { isPackedTensor as l } from "../../utilities/packed.js";
|
|
6
|
+
import { r as u } from "../../tensor_util-DV-FP5Q3.js";
|
|
7
|
+
function k(i) {
|
|
8
|
+
const { x: r, kernel: c } = i.inputs, { heads: e } = i.attrs, t = r.shape[0], n = r.shape[1], s = r.shape[2], m = l(r);
|
|
9
|
+
if (h(c.shape, [m ? s * 2 : s, 3 * s], "Error in QKV: "), s % e !== 0)
|
|
10
|
+
throw new Error(`Channel dimension ${s} must be divisible by number of heads ${e} in QKV.`);
|
|
11
|
+
const o = f(r, c, !1, !1, {
|
|
12
|
+
forceOutputShape: [t, n, 3 * e, s / e],
|
|
13
|
+
perm: [0, 2, 1, 3]
|
|
14
|
+
}), p = [
|
|
15
|
+
a(o, [0, 0, 0, 0], [t, e, n, s / e]),
|
|
16
|
+
a(o, [0, e, 0, 0], [t, e, n, s / e]),
|
|
17
|
+
a(o, [0, 2 * e, 0, 0], [t, e, n, s / e])
|
|
54
18
|
];
|
|
19
|
+
return o.dispose(), p;
|
|
55
20
|
}
|
|
56
21
|
const b = {
|
|
57
22
|
kernelName: "QKV",
|
|
58
23
|
backendName: "webgpu",
|
|
59
|
-
kernelFunc:
|
|
24
|
+
kernelFunc: k
|
|
60
25
|
};
|
|
61
|
-
|
|
26
|
+
u(b);
|
package/dist/ops/webgpu/rope.js
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import { f as
|
|
4
|
-
|
|
1
|
+
import { isPackedTensor as w } from "../../utilities/packed.js";
|
|
2
|
+
import { e as x } from "../../webgpu_program-Cigz-7RF.js";
|
|
3
|
+
import { f as l, c as m } from "../../webgpu_util-BBCnKm2X.js";
|
|
4
|
+
import "../../index-ZyQhjEPo.js";
|
|
5
|
+
import { j as b } from "../../tensor-DdQUJZlz.js";
|
|
6
|
+
import { r as v } from "../../tensor_util-DV-FP5Q3.js";
|
|
7
|
+
class k {
|
|
5
8
|
variableNames = ["x", "sin", "cos"];
|
|
6
9
|
outputShape;
|
|
7
10
|
shaderKey = "Rope";
|
|
@@ -10,13 +13,13 @@ class S {
|
|
|
10
13
|
workgroupSize = [64, 1, 1];
|
|
11
14
|
size = !0;
|
|
12
15
|
uniforms = "pastLen: i32";
|
|
13
|
-
constructor(
|
|
14
|
-
this.shaderKey = `Rope_${
|
|
16
|
+
constructor(e, s, r, t) {
|
|
17
|
+
this.shaderKey = `Rope_${t}`, this.outputShape = [e, s, r, t], this.dispatchLayout = l(this.outputShape), this.dispatch = m(this.dispatchLayout, this.outputShape, this.workgroupSize);
|
|
15
18
|
}
|
|
16
19
|
getUserCode() {
|
|
17
|
-
const
|
|
20
|
+
const e = this.outputShape[3];
|
|
18
21
|
return `
|
|
19
|
-
${
|
|
22
|
+
${x("index")} {
|
|
20
23
|
if (index < uniforms.size) {
|
|
21
24
|
let coords = getCoordsFromIndex(index); // [b, h, t, d]
|
|
22
25
|
let b = coords[0];
|
|
@@ -24,7 +27,7 @@ class S {
|
|
|
24
27
|
let t = coords[2];
|
|
25
28
|
let d = coords[3];
|
|
26
29
|
|
|
27
|
-
let rotaryDim = ${
|
|
30
|
+
let rotaryDim = ${e};
|
|
28
31
|
|
|
29
32
|
var outVal = 0.0;
|
|
30
33
|
|
|
@@ -61,22 +64,74 @@ class S {
|
|
|
61
64
|
`;
|
|
62
65
|
}
|
|
63
66
|
}
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
+
class L {
|
|
68
|
+
variableNames = ["x", "sin", "cos"];
|
|
69
|
+
outputShape;
|
|
70
|
+
shaderKey = "Rope";
|
|
71
|
+
dispatchLayout;
|
|
72
|
+
dispatch;
|
|
73
|
+
workgroupSize = [64, 1, 1];
|
|
74
|
+
size = !0;
|
|
75
|
+
uniforms = "pastLen: i32";
|
|
76
|
+
constructor(e, s, r, t) {
|
|
77
|
+
this.shaderKey = `Rope_${t}`, this.outputShape = [e, s, r, t / 2], this.dispatchLayout = l(this.outputShape), this.dispatch = m(this.dispatchLayout, this.outputShape, this.workgroupSize);
|
|
78
|
+
}
|
|
79
|
+
getUserCode() {
|
|
80
|
+
return `
|
|
81
|
+
${x("index")} {
|
|
82
|
+
if (index < uniforms.size) {
|
|
83
|
+
let coords = getCoordsFromIndex(index); // [b, h, t, d]
|
|
84
|
+
let b = coords[0];
|
|
85
|
+
let h = coords[1];
|
|
86
|
+
let t = coords[2];
|
|
87
|
+
let d = coords[3];
|
|
88
|
+
|
|
89
|
+
var outVal = vec2<f32>(0.0, 0.0);
|
|
90
|
+
|
|
91
|
+
let xIdx = b * uniforms.outShapeStrides[0] +
|
|
92
|
+
h * uniforms.outShapeStrides[1] +
|
|
93
|
+
t * uniforms.outShapeStrides[2] +
|
|
94
|
+
d;
|
|
95
|
+
|
|
96
|
+
let idx = (t + uniforms.pastLen) * uniforms.cosShape[1] + d;
|
|
97
|
+
let cos = cos[idx];
|
|
98
|
+
let sin = sin[idx];
|
|
99
|
+
|
|
100
|
+
let xPair = unpack2x16float(u32(x[xIdx]));
|
|
101
|
+
let ownX = vec2<f32>(xPair.x * cos, xPair.y * cos);
|
|
102
|
+
|
|
103
|
+
let evenOdd = vec2<f32>(
|
|
104
|
+
-xPair.y,
|
|
105
|
+
xPair.x
|
|
106
|
+
);
|
|
107
|
+
|
|
108
|
+
outVal = vec2<f32>(
|
|
109
|
+
fma(evenOdd.x, sin, ownX.x),
|
|
110
|
+
fma(evenOdd.y, sin, ownX.y)
|
|
111
|
+
);
|
|
112
|
+
|
|
113
|
+
result[index] = i32(pack2x16float(outVal));
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
`;
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
function P(i) {
|
|
120
|
+
const { x: e } = i.inputs, { pastLen: s, negSin: r, ropeCache: t } = i.attrs, f = i.backend, a = w(e), p = e.shape[0], h = e.shape[1], n = e.shape[2], d = a ? e.shape[3] * 2 : e.shape[3], o = r ? t.getNegSin() : t.getSin(), u = t.getCos();
|
|
121
|
+
if (b(o.shape, u.shape, "Error in Rope: "), o.shape[0] < n + s)
|
|
67
122
|
throw new Error(
|
|
68
|
-
`Sin tensor shape ${
|
|
123
|
+
`Sin tensor shape ${o.shape} is not compatible with seqLength ${n} and pastLen ${s}.`
|
|
69
124
|
);
|
|
70
|
-
if (
|
|
71
|
-
throw new Error(`Sin tensor shape ${
|
|
72
|
-
if (
|
|
73
|
-
throw new Error(`Sin tensor must be 3-dimensional, but got shape ${
|
|
74
|
-
const
|
|
75
|
-
return
|
|
125
|
+
if (o.shape[1] * 2 < d)
|
|
126
|
+
throw new Error(`Sin tensor shape ${o.shape} is not compatible with feature dimension ${d}.`);
|
|
127
|
+
if (o.shape.length !== 3)
|
|
128
|
+
throw new Error(`Sin tensor must be 3-dimensional, but got shape ${o.shape}.`);
|
|
129
|
+
const S = a ? new L(p, h, n, d) : new k(p, h, n, d), g = [{ type: "int32", data: [s] }], y = a ? "int32" : e.dtype, c = f.runWebGPUProgram(S, [e, o, u], y, g);
|
|
130
|
+
return c.packed = a, c;
|
|
76
131
|
}
|
|
77
|
-
const
|
|
132
|
+
const $ = {
|
|
78
133
|
kernelName: "Rope",
|
|
79
134
|
backendName: "webgpu",
|
|
80
|
-
kernelFunc:
|
|
135
|
+
kernelFunc: P
|
|
81
136
|
};
|
|
82
|
-
|
|
137
|
+
v($);
|
|
@@ -1,6 +1,8 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { f as u, c as d } from "../../webgpu_util-
|
|
3
|
-
import
|
|
1
|
+
import { e as p } from "../../webgpu_program-Cigz-7RF.js";
|
|
2
|
+
import { f as u, c as d } from "../../webgpu_util-BBCnKm2X.js";
|
|
3
|
+
import "../../index-ZyQhjEPo.js";
|
|
4
|
+
import { j as s } from "../../tensor-DdQUJZlz.js";
|
|
5
|
+
import { r as h } from "../../tensor_util-DV-FP5Q3.js";
|
|
4
6
|
class b {
|
|
5
7
|
variableNames = ["labels", "softmaxProbs", "dy"];
|
|
6
8
|
outputShape;
|
|
@@ -26,11 +28,11 @@ class b {
|
|
|
26
28
|
`;
|
|
27
29
|
}
|
|
28
30
|
}
|
|
29
|
-
function f(
|
|
30
|
-
const { logits: t, labels: e, dy:
|
|
31
|
-
|
|
31
|
+
function f(o) {
|
|
32
|
+
const { logits: t, labels: e, dy: a } = o.inputs, c = o.backend, r = e.shape[0], i = t.shape[1];
|
|
33
|
+
s(a.shape, [r], "Error in EfficientScatterSub dy: "), s(t.shape, [r, i], "Error in EfficientScatterSub logits: "), s(e.shape, [r], "Error in EfficientScatterSub labels: ");
|
|
32
34
|
const n = new b(r, i);
|
|
33
|
-
return c.runWebGPUProgram(n, [e, t,
|
|
35
|
+
return c.runWebGPUProgram(n, [e, t, a], "float32");
|
|
34
36
|
}
|
|
35
37
|
const l = {
|
|
36
38
|
kernelName: "EfficientScatterSub",
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import { WebGPUBackend } from '@tensorflow/tfjs-backend-webgpu';
|
|
2
|
+
import { SliceAttrs, SliceInputs, TensorInfo } from '@tensorflow/tfjs-core';
|
|
3
|
+
export declare function slice(args: {
|
|
4
|
+
inputs: SliceInputs;
|
|
5
|
+
backend: WebGPUBackend;
|
|
6
|
+
attrs: SliceAttrs;
|
|
7
|
+
}): TensorInfo;
|