@genai-fi/nanogpt 0.10.1 → 0.10.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.js +11761 -171
- package/dist/{RealDiv-DgA3z9oO.js → RealDiv-KAPDe8zB.js} +28 -30
- package/dist/Reshape-BYkmUnAv.js +14 -0
- package/dist/{Reshape-_kILl6tK.js → Reshape-Zt6eb7yh.js} +18 -20
- package/dist/TeachableLLM.js +10 -11
- package/dist/{axis_util-BvHEw88j.js → axis_util-BaG7mf5A.js} +3 -3
- package/dist/backend.js +2 -2
- package/dist/{backend_util-D-rUb2ty.js → backend_util-RCe-rHaj.js} +59 -60
- package/dist/{backend_webgpu-B0u2ndUn.js → backend_webgpu-DE3ACOLx.js} +45 -47
- package/dist/broadcast_to-B3eYlZm7.js +28 -0
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +7 -11
- package/dist/checks/normRMS.js +9 -9
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +12 -13
- package/dist/checks/rope.js +2 -2
- package/dist/clip_by_value-BnO7-a88.js +12 -0
- package/dist/complex-DjxcVmoX.js +11 -0
- package/dist/concat-BV8bt5H-.js +17 -0
- package/dist/{concat_util-DcJk7YHS.js → concat_util-DpW8mL_l.js} +1 -1
- package/dist/{dataset-0xP8GjwI.js → dataset-BcwmTGYc.js} +137 -139
- package/dist/dropout-BcvN9JYi.js +92 -0
- package/dist/expand_dims-DT4tEPwA.js +11 -0
- package/dist/{exports_initializers-xuidcwI4.js → exports_initializers-Hta_rEnm.js} +1 -1
- package/dist/floor-D5QdR_le.js +9 -0
- package/dist/gather-D3JcZUaI.js +9 -0
- package/dist/{gelu-CNLFZWea.js → gelu-CjNPL4OH.js} +10 -11
- package/dist/{gpgpu_math-DDVJCn6-.js → gpgpu_math-DAOmgtXR.js} +841 -1015
- package/dist/{index-CjOj7j-u.js → index-BwexR4lA.js} +262 -263
- package/dist/index-DOvlwCh-.js +3520 -0
- package/dist/{kernel_funcs_utils-Dg_-E44D.js → kernel_funcs_utils-CCzYdUZg.js} +129 -131
- package/dist/layers/BaseLayer.js +14 -15
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +7 -7
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +9 -9
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +13 -14
- package/dist/log_sum_exp-ngO0-4pK.js +39 -0
- package/dist/main.js +49 -50
- package/dist/{matMul16--R5hOwDG.js → matMul16-BWRSOCWB.js} +14 -15
- package/dist/matMulGelu-CzfgT6Wq.js +163 -0
- package/dist/mat_mul-SjpJRLyL.js +11 -0
- package/dist/mod-AnXEvvpo.js +11 -0
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +13 -14
- package/dist/ones-D2rT0xk2.js +14 -0
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +13 -14
- package/dist/ops/cpu/adamMoments.js +6 -7
- package/dist/ops/cpu/appendCache.js +7 -8
- package/dist/ops/cpu/attentionMask.js +7 -7
- package/dist/ops/cpu/fusedSoftmax.js +10 -11
- package/dist/ops/cpu/gatherSub.js +9 -10
- package/dist/ops/cpu/gelu.js +9 -10
- package/dist/ops/cpu/matMul16.js +6 -7
- package/dist/ops/cpu/matMulGelu.js +5 -6
- package/dist/ops/cpu/matMulMul.js +3 -4
- package/dist/ops/cpu/mulDropout.js +3 -4
- package/dist/ops/cpu/normRMS.js +10 -11
- package/dist/ops/cpu/qkv.js +8 -9
- package/dist/ops/cpu/rope.js +5 -6
- package/dist/ops/cpu/scatterSub.js +17 -19
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +11 -12
- package/dist/ops/grads/attentionMask.js +5 -6
- package/dist/ops/grads/gelu.js +3 -4
- package/dist/ops/grads/matMul16.js +4 -5
- package/dist/ops/grads/matMulGelu.js +9 -10
- package/dist/ops/grads/normRMS.js +7 -8
- package/dist/ops/grads/pack16.js +4 -5
- package/dist/ops/grads/qkv.js +17 -19
- package/dist/ops/grads/rope.js +3 -5
- package/dist/ops/grads/softmax16.js +3 -4
- package/dist/ops/grads/unpack16.js +3 -4
- package/dist/ops/grads/utils.d.ts +1 -0
- package/dist/ops/grads/utils.js +8 -4
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +2 -2
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +3 -4
- package/dist/ops/qkv.js +4 -8
- package/dist/ops/reshape16.js +14 -16
- package/dist/ops/rope.d.ts +1 -1
- package/dist/ops/rope.js +3 -8
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +5 -8
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +23 -24
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +2 -3
- package/dist/ops/webgl/adamMoments.js +1 -2
- package/dist/ops/webgl/appendCache.js +1 -2
- package/dist/ops/webgl/attentionMask.js +4 -5
- package/dist/ops/webgl/fusedSoftmax.js +4 -6
- package/dist/ops/webgl/gatherSub.js +6 -7
- package/dist/ops/webgl/gelu.js +2 -3
- package/dist/ops/webgl/log.js +11 -12
- package/dist/ops/webgl/matMul16.js +10 -11
- package/dist/ops/webgl/matMulGelu.js +7 -111
- package/dist/ops/webgl/matMulMul.js +9 -10
- package/dist/ops/webgl/mulDropout.js +8 -9
- package/dist/ops/webgl/normRMS.js +2 -3
- package/dist/ops/webgl/qkv.js +5 -6
- package/dist/ops/webgl/rope.js +7 -8
- package/dist/ops/webgl/scatterSub.js +5 -6
- package/dist/ops/webgpu/adamAdjust.js +10 -12
- package/dist/ops/webgpu/adamMoments.js +8 -10
- package/dist/ops/webgpu/add16.js +8 -9
- package/dist/ops/webgpu/appendCache.js +23 -25
- package/dist/ops/webgpu/attentionMask.js +8 -10
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +12 -14
- package/dist/ops/webgpu/gatherSub.js +11 -13
- package/dist/ops/webgpu/gelu.js +28 -29
- package/dist/ops/webgpu/matMul16.js +26 -28
- package/dist/ops/webgpu/matMul16_program.js +4 -5
- package/dist/ops/webgpu/mul16.js +9 -10
- package/dist/ops/webgpu/normRMS.js +15 -17
- package/dist/ops/webgpu/normRMSGrad.js +21 -28
- package/dist/ops/webgpu/pack16.js +12 -13
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +16 -18
- package/dist/ops/webgpu/rope.js +25 -27
- package/dist/ops/webgpu/scatterSub.js +7 -9
- package/dist/ops/webgpu/slice16.js +21 -23
- package/dist/ops/webgpu/softmax16.js +17 -19
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +7 -8
- package/dist/ops/webgpu/sub16.js +7 -8
- package/dist/ops/webgpu/sum16.js +18 -20
- package/dist/ops/webgpu/transpose16.js +19 -20
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +11 -12
- package/dist/ops/webgpu/unpack16.js +3 -4
- package/dist/ops/webgpu/utils/binary_op.js +7 -8
- package/dist/ops/webgpu/utils/reductions.js +14 -22
- package/dist/ops-B5yanEdW.js +476 -0
- package/dist/pack16-nQ6JaLo-.js +39 -0
- package/dist/patches/webgpu_backend.js +19 -20
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +21 -22
- package/dist/{random_width-DY6Kk2Dl.js → random_width-or-CEftb.js} +2506 -2761
- package/dist/range-BklejeeW.js +10 -0
- package/dist/relu-CP0ZcxWO.js +9 -0
- package/dist/reshape-ByE68wS9.js +9 -0
- package/dist/resize_nearest_neighbor-B19mCEg2.js +175 -0
- package/dist/rope-Ir4mTyD1.js +24 -0
- package/dist/{scatter_nd_util-5EL-8VAQ.js → scatter_nd_util-lvSiX8q4.js} +1 -1
- package/dist/selu_util-kbhpTdYD.js +44 -0
- package/dist/{shared-BRksrJb3.js → shared-DT1TkE6w.js} +1 -1
- package/dist/{shared-BuAXb4CI.js → shared-dntlHIDQ.js} +343 -345
- package/dist/slice-BfEGSH82.js +12 -0
- package/dist/{slice_util-DtEldBfK.js → slice_util-uTKwiEpW.js} +1 -1
- package/dist/{softmax-ZHVebtR1.js → softmax-CA5jFsLR.js} +4 -5
- package/dist/split-CVLc0w--.js +9 -0
- package/dist/squeeze-C7Z2srUo.js +10 -0
- package/dist/stack-Cf4n9h0N.js +11 -0
- package/dist/step-CINUs5QB.js +261 -0
- package/dist/sum-DWAtNGez.js +11 -0
- package/dist/tensor-DJoc7gJU.js +8 -0
- package/dist/tensor1d-D11P_7Dp.js +11 -0
- package/dist/{tensor2d-G4Ys2GxX.js → tensor2d-Bs9wZRc7.js} +6 -7
- package/dist/{tensor4d-B8roDgtc.js → tensor4d-BARPdTaS.js} +6 -7
- package/dist/{tfjs_backend-kNyO5L2d.js → tfjs_backend-y1cvNhLA.js} +244 -253
- package/dist/tile-mbfagpsB.js +11 -0
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.js +2 -2
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.js +2 -2
- package/dist/training/sparseCrossEntropy.js +5 -5
- package/dist/transpose-ClWiBS_b.js +36 -0
- package/dist/unsorted_segment_sum-BDDhB_E6.js +277 -0
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.d.ts +1 -4
- package/dist/utilities/packed.js +10 -711
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.js +5 -5
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-Bhn5bHYv.js → variable-WawDEaAb.js} +1 -1
- package/dist/{webgpu_program-Cigz-7RF.js → webgpu_program-DuOXPQol.js} +178 -172
- package/dist/{webgpu_util-BBCnKm2X.js → webgpu_util-RxEF33Rj.js} +34 -35
- package/dist/zeros-KnWaWf-X.js +13 -0
- package/dist/zeros_like-DvE73F4e.js +721 -0
- package/package.json +4 -2
- package/dist/Reshape-CF6odzV4.js +0 -16
- package/dist/broadcast_to-CwF7XIeu.js +0 -30
- package/dist/complex-CSlYz-2T.js +0 -13
- package/dist/concat-BHlIJeyT.js +0 -19
- package/dist/dropout-C1pM3f11.js +0 -99
- package/dist/expand_dims-BPG4fwBP.js +0 -13
- package/dist/gather-DykLGqmW.js +0 -10
- package/dist/index-ZyQhjEPo.js +0 -2157
- package/dist/log_sum_exp-DWI-76TI.js +0 -41
- package/dist/mat_mul-DeAh4uTH.js +0 -12
- package/dist/mod-Gt1rMB4n.js +0 -12
- package/dist/mulmat_packed_gpu-BMFhLwta.js +0 -55
- package/dist/ones-CAMiP4I2.js +0 -15
- package/dist/ops-CNI3TwqM.js +0 -645
- package/dist/pack16-CFUqumar.js +0 -41
- package/dist/patches/PackedTensor.d.ts +0 -12
- package/dist/patches/PackedTensor.js +0 -11
- package/dist/patches/engine.d.ts +0 -261
- package/dist/patches/engine.js +0 -10
- package/dist/patches/tape.d.ts +0 -12
- package/dist/patches/tape.js +0 -5
- package/dist/range-BMS52eQi.js +0 -11
- package/dist/reciprocal-CTmshQ9J.js +0 -10
- package/dist/register_all_kernels-Bwu1PTuU.js +0 -12307
- package/dist/relu-yZ2-7WxU.js +0 -10
- package/dist/reshape-DevtBWtf.js +0 -10
- package/dist/rope-B5UUMsPi.js +0 -32
- package/dist/selu_util-D1w6yyTO.js +0 -303
- package/dist/sin-BGfy2HZo.js +0 -16
- package/dist/slice-D_gkkqZK.js +0 -13
- package/dist/split-DrfihRpZ.js +0 -10
- package/dist/squeeze-DZEpeblb.js +0 -11
- package/dist/stack-yOIAalTq.js +0 -13
- package/dist/sum-_fzj5ZTB.js +0 -12
- package/dist/tensor-DdQUJZlz.js +0 -909
- package/dist/tensor-f35l8Odg.js +0 -8
- package/dist/tensor1d-CeZuc-Rv.js +0 -12
- package/dist/tensor_util-DV-FP5Q3.js +0 -523
- package/dist/tile-BzyEiF-F.js +0 -13
- package/dist/transpose-DKELTqhe.js +0 -38
- package/dist/zeros-2gldETuK.js +0 -14
|
@@ -1,19 +1,18 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
a[e - 1] = 1;
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
i
|
|
1
|
+
import { p as k, w as t, x as i, m as w, y as z } from "../../index-DOvlwCh-.js";
|
|
2
|
+
function A(c) {
|
|
3
|
+
const { moments: s, value: r } = c.inputs, { beta1: l, beta2: m, epsilon: u, learningRate: d } = c.attrs, e = s.shape.length, a = new Array(e).fill(0), n = s.shape.slice();
|
|
4
|
+
n[e - 1] = 1;
|
|
5
|
+
const o = a.slice();
|
|
6
|
+
o[e - 1] = 1;
|
|
7
|
+
const p = n.slice(), b = s.slice(a, n).squeeze([e - 1]), M = s.slice(o, p).squeeze([e - 1]), g = t(b, l), f = t(M, m);
|
|
8
|
+
return i(
|
|
9
|
+
w(t(g, i(z(f), u ?? 1e-8)), -d),
|
|
10
|
+
r
|
|
12
11
|
);
|
|
13
12
|
}
|
|
14
|
-
const
|
|
13
|
+
const C = {
|
|
15
14
|
kernelName: "AdamAdjust",
|
|
16
15
|
backendName: "cpu",
|
|
17
|
-
kernelFunc:
|
|
16
|
+
kernelFunc: A
|
|
18
17
|
};
|
|
19
|
-
|
|
18
|
+
k(C);
|
|
@@ -1,12 +1,11 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import {
|
|
3
|
-
import { s as b } from "../../stack-yOIAalTq.js";
|
|
1
|
+
import { p } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { s as b } from "../../stack-Cf4n9h0N.js";
|
|
4
3
|
function f(t) {
|
|
5
|
-
const { moments: n, gradient:
|
|
4
|
+
const { moments: n, gradient: c } = t.inputs, { beta1: o, beta2: m } = t.attrs, e = n.shape.length, a = new Array(e).fill(0), s = n.shape.slice();
|
|
6
5
|
s[e - 1] = 1;
|
|
7
|
-
const
|
|
8
|
-
|
|
9
|
-
const
|
|
6
|
+
const i = a.slice();
|
|
7
|
+
i[e - 1] = 1;
|
|
8
|
+
const r = s.slice(), l = n.slice(a, s).squeeze([e - 1]), u = n.slice(i, r).squeeze([e - 1]), M = l.mul(o).add(c.mul(1 - o)), d = u.mul(m).add(c.square().mul(1 - m));
|
|
10
9
|
return b([M, d], -1);
|
|
11
10
|
}
|
|
12
11
|
const g = {
|
|
@@ -1,13 +1,12 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import {
|
|
3
|
-
import { c as h } from "../../concat-BHlIJeyT.js";
|
|
1
|
+
import { p as d } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { c as h } from "../../concat-BV8bt5H-.js";
|
|
4
3
|
function u(p) {
|
|
5
|
-
const { cache: n, item: s } = p.inputs, { maxSize:
|
|
6
|
-
if (c + e <=
|
|
7
|
-
const
|
|
8
|
-
return
|
|
4
|
+
const { cache: n, item: s } = p.inputs, { maxSize: i, pastLen: c } = p.attrs, t = n.shape[0], o = n.shape[1], a = n.shape[3], e = s.shape[2];
|
|
5
|
+
if (c + e <= i) {
|
|
6
|
+
const f = n.slice([0, 0, 0, 0], [t, o, c, a]), m = n.slice([0, 0, c + e, 0], [t, o, i - c - e, a]), r = e < e ? s.slice([0, 0, 0, 0], [t, o, e, a]) : s, k = h([f, r, m], 2);
|
|
7
|
+
return f.dispose(), m.dispose(), r !== s && r.dispose(), k;
|
|
9
8
|
}
|
|
10
|
-
const l = n.slice([0, 0, e, 0], [t, o,
|
|
9
|
+
const l = n.slice([0, 0, e, 0], [t, o, i - e, a]), C = h([l, s], 2);
|
|
11
10
|
return l.dispose(), C;
|
|
12
11
|
}
|
|
13
12
|
const w = {
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
import {
|
|
5
|
-
import {
|
|
6
|
-
import { m as g } from "../../mat_mul-
|
|
1
|
+
import { p as o, q as d, b as u } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { l as N } from "../../ops-B5yanEdW.js";
|
|
3
|
+
import { o as b } from "../../ones-D2rT0xk2.js";
|
|
4
|
+
import { z as A } from "../../zeros-KnWaWf-X.js";
|
|
5
|
+
import { w as I } from "../../resize_nearest_neighbor-B19mCEg2.js";
|
|
6
|
+
import { m as g } from "../../mat_mul-SjpJRLyL.js";
|
|
7
7
|
function a(n) {
|
|
8
|
-
const { q: s, k: e } = n.inputs, { divisor: r } = n.attrs, c = s.shape[2], t = e.shape[2], m = N.bandPart(
|
|
8
|
+
const { q: s, k: e } = n.inputs, { divisor: r } = n.attrs, c = s.shape[2], t = e.shape[2], m = N.bandPart(b([t, t]), -1, 0).cast("bool"), i = A([t, t]), l = d([t, t], Number.NEGATIVE_INFINITY), f = I(m, i, l), k = g(s, e, !1, !0).mul(u(r)), p = f.slice([0, 0], [c, t]).expandDims(0).expandDims(0);
|
|
9
9
|
return k.add(p);
|
|
10
10
|
}
|
|
11
11
|
const w = {
|
|
@@ -1,30 +1,29 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import {
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
if (!n)
|
|
1
|
+
import { p as e } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { s as m } from "../../softmax-CA5jFsLR.js";
|
|
3
|
+
function n(t) {
|
|
4
|
+
const { inputs: s, attrs: a } = t, { logits: o } = s, { dim: i, dropoutRate: r } = a;
|
|
5
|
+
if (!o)
|
|
7
6
|
throw new Error("Error in softmax: input logits is null");
|
|
8
|
-
return r !== void 0 && r > 0 && console.warn("Dropout in fusedSoftmax not implemented for CPU backend, skipping dropout."), m(
|
|
7
|
+
return r !== void 0 && r > 0 && console.warn("Dropout in fusedSoftmax not implemented for CPU backend, skipping dropout."), m(o, i);
|
|
9
8
|
}
|
|
10
9
|
const f = {
|
|
11
10
|
kernelName: "FusedSoftmax",
|
|
12
11
|
backendName: "cpu",
|
|
13
|
-
kernelFunc:
|
|
12
|
+
kernelFunc: n
|
|
14
13
|
};
|
|
15
14
|
e(f);
|
|
16
15
|
const u = {
|
|
17
16
|
kernelName: "FusedSoftmax",
|
|
18
17
|
backendName: "tensorflow",
|
|
19
|
-
kernelFunc:
|
|
18
|
+
kernelFunc: n
|
|
20
19
|
};
|
|
21
20
|
e(u);
|
|
22
21
|
const l = {
|
|
23
22
|
kernelName: "FusedSoftmax",
|
|
24
23
|
backendName: "webgpu",
|
|
25
|
-
kernelFunc:
|
|
24
|
+
kernelFunc: n
|
|
26
25
|
};
|
|
27
26
|
e(l);
|
|
28
27
|
export {
|
|
29
|
-
|
|
28
|
+
n as softmaxCPU
|
|
30
29
|
};
|
|
@@ -1,15 +1,14 @@
|
|
|
1
|
-
import { A as u, B as c, E as
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
return m.runKernel(p, r);
|
|
1
|
+
import { A as u, B as c, E as g, aj as p, p as h, c as m } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { r as l } from "../../range-BklejeeW.js";
|
|
3
|
+
import { s as N } from "../../stack-Cf4n9h0N.js";
|
|
4
|
+
function f(e, t) {
|
|
5
|
+
const n = c(t, "indices", "gatherND", "int32"), s = { params: c(e, "x", "gatherND", "string_or_numeric"), indices: n };
|
|
6
|
+
return g.runKernel(p, s);
|
|
8
7
|
}
|
|
9
|
-
const b = /* @__PURE__ */ u({ gatherND_:
|
|
8
|
+
const b = /* @__PURE__ */ u({ gatherND_: f });
|
|
10
9
|
function d(e) {
|
|
11
|
-
const { values: t, labels: n, logits:
|
|
12
|
-
return
|
|
10
|
+
const { values: t, labels: n, logits: r } = e.inputs, s = n.shape[0], a = l(0, s, 1, "int32"), i = N([a, n], 1), o = b(r, i);
|
|
11
|
+
return m(t, o);
|
|
13
12
|
}
|
|
14
13
|
const k = {
|
|
15
14
|
kernelName: "EfficientGatherSub",
|
package/dist/ops/cpu/gelu.js
CHANGED
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
import { t as d } from "../../index-
|
|
2
|
-
import { r } from "../../tensor_util-DV-FP5Q3.js";
|
|
1
|
+
import { p as t, t as d } from "../../index-DOvlwCh-.js";
|
|
3
2
|
const o = 0.7978845608028654, c = 0.044715;
|
|
4
|
-
function m(
|
|
5
|
-
const { inputs: u } =
|
|
3
|
+
function m(r) {
|
|
4
|
+
const { inputs: u } = r, { x: n } = u, e = n;
|
|
6
5
|
return d(() => {
|
|
7
6
|
const l = e.pow(3), s = e.add(l.mul(c)).mul(o).tanh().add(1).mul(0.5);
|
|
8
7
|
return e.mul(s);
|
|
@@ -13,15 +12,15 @@ const N = {
|
|
|
13
12
|
backendName: "cpu",
|
|
14
13
|
kernelFunc: m
|
|
15
14
|
};
|
|
16
|
-
|
|
15
|
+
t(N);
|
|
17
16
|
const K = {
|
|
18
17
|
kernelName: "Gelu",
|
|
19
18
|
backendName: "tensorflow",
|
|
20
19
|
kernelFunc: m
|
|
21
20
|
};
|
|
22
|
-
|
|
23
|
-
function i(
|
|
24
|
-
const { dy: u, x: n } =
|
|
21
|
+
t(K);
|
|
22
|
+
function i(r) {
|
|
23
|
+
const { dy: u, x: n } = r.inputs;
|
|
25
24
|
return d(() => {
|
|
26
25
|
const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), p = g.add(G);
|
|
27
26
|
return u.mul(p);
|
|
@@ -32,10 +31,10 @@ const x = {
|
|
|
32
31
|
backendName: "cpu",
|
|
33
32
|
kernelFunc: i
|
|
34
33
|
};
|
|
35
|
-
|
|
34
|
+
t(x);
|
|
36
35
|
const h = {
|
|
37
36
|
kernelName: "GeluGrad",
|
|
38
37
|
backendName: "tensorflow",
|
|
39
38
|
kernelFunc: i
|
|
40
39
|
};
|
|
41
|
-
|
|
40
|
+
t(h);
|
package/dist/ops/cpu/matMul16.js
CHANGED
|
@@ -1,16 +1,15 @@
|
|
|
1
1
|
import { isPackedTensor as t } from "../../utilities/packed.js";
|
|
2
|
-
import "../../index-
|
|
3
|
-
import {
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
const { A: e, B: n } = r.inputs, { transposeA: o, transposeB: s } = r.attrs, a = !t(e), c = !t(n);
|
|
2
|
+
import { p } from "../../index-DOvlwCh-.js";
|
|
3
|
+
import { m as l } from "../../mat_mul-SjpJRLyL.js";
|
|
4
|
+
function m(e) {
|
|
5
|
+
const { A: n, B: r } = e.inputs, { transposeA: o, transposeB: s } = e.attrs, a = !t(n), c = !t(r);
|
|
7
6
|
if (a && c)
|
|
8
|
-
return
|
|
7
|
+
return l(n, r, o, s);
|
|
9
8
|
throw new Error("MatMul16 CPU kernel only supports packed tensors currently.");
|
|
10
9
|
}
|
|
11
10
|
const u = {
|
|
12
11
|
kernelName: "MatMul16",
|
|
13
12
|
backendName: "cpu",
|
|
14
|
-
kernelFunc:
|
|
13
|
+
kernelFunc: m
|
|
15
14
|
};
|
|
16
15
|
p(u);
|
|
@@ -1,12 +1,11 @@
|
|
|
1
|
-
import { t as m } from "../../index-
|
|
2
|
-
import { g as
|
|
3
|
-
import {
|
|
4
|
-
import { m as k } from "../../mat_mul-DeAh4uTH.js";
|
|
1
|
+
import { p as e, t as m } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { g as M, d as i } from "../../gelu-CjNPL4OH.js";
|
|
3
|
+
import { m as k } from "../../mat_mul-SjpJRLyL.js";
|
|
5
4
|
function c(t) {
|
|
6
5
|
const { inputs: u } = t, { x: n, kernel: r } = u, a = n, l = r;
|
|
7
6
|
return m(() => {
|
|
8
7
|
const o = k(a, l);
|
|
9
|
-
return
|
|
8
|
+
return M(o);
|
|
10
9
|
});
|
|
11
10
|
}
|
|
12
11
|
const G = {
|
|
@@ -30,7 +29,7 @@ e(p);
|
|
|
30
29
|
function s(t) {
|
|
31
30
|
const { dy: u, x: n, kernel: r } = t.inputs;
|
|
32
31
|
return m(() => {
|
|
33
|
-
const a = k(n, r), l =
|
|
32
|
+
const a = k(n, r), l = i(u, a), o = l.matMul(r.transpose()), d = n.transpose().matMul(l);
|
|
34
33
|
return [o, d];
|
|
35
34
|
});
|
|
36
35
|
}
|
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
import { t as
|
|
2
|
-
import { r as e } from "../../tensor_util-DV-FP5Q3.js";
|
|
1
|
+
import { p as e, t as i } from "../../index-DOvlwCh-.js";
|
|
3
2
|
function n(t) {
|
|
4
|
-
const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u,
|
|
5
|
-
return
|
|
3
|
+
const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, k = a, M = c;
|
|
4
|
+
return i(() => m.matMul(k, o, s).mul(M));
|
|
6
5
|
}
|
|
7
6
|
const p = {
|
|
8
7
|
kernelName: "MatMulMul",
|
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
import { m as
|
|
2
|
-
import { r as e } from "../../tensor_util-DV-FP5Q3.js";
|
|
1
|
+
import { p as e, m as t } from "../../index-DOvlwCh-.js";
|
|
3
2
|
function n(o) {
|
|
4
|
-
const { inputs: r } = o, { a: l, b:
|
|
5
|
-
return console.warn("Using fallback mulDrop implementation without dropout."),
|
|
3
|
+
const { inputs: r } = o, { a: l, b: u } = r;
|
|
4
|
+
return console.warn("Using fallback mulDrop implementation without dropout."), t(l, u);
|
|
6
5
|
}
|
|
7
6
|
const a = {
|
|
8
7
|
kernelName: "MulDropout",
|
package/dist/ops/cpu/normRMS.js
CHANGED
|
@@ -1,29 +1,28 @@
|
|
|
1
|
-
import { t as d } from "../../index-
|
|
2
|
-
import { r as a } from "../../tensor_util-DV-FP5Q3.js";
|
|
1
|
+
import { p as o, t as d } from "../../index-DOvlwCh-.js";
|
|
3
2
|
function i(t) {
|
|
4
|
-
const { inputs: e } = t, { x: n, gamma: s } = e, r = n,
|
|
3
|
+
const { inputs: e } = t, { x: n, gamma: s } = e, r = n, a = s;
|
|
5
4
|
return d(() => {
|
|
6
5
|
const u = r.square().mean(-1, !0).add(1e-8).rsqrt();
|
|
7
|
-
return r.mul(u).mul(
|
|
6
|
+
return r.mul(u).mul(a);
|
|
8
7
|
});
|
|
9
8
|
}
|
|
10
|
-
const
|
|
9
|
+
const f = {
|
|
11
10
|
kernelName: "RMSNorm",
|
|
12
11
|
backendName: "cpu",
|
|
13
12
|
kernelFunc: i
|
|
14
13
|
};
|
|
15
|
-
|
|
14
|
+
o(f);
|
|
16
15
|
const g = {
|
|
17
16
|
kernelName: "RMSNorm",
|
|
18
17
|
backendName: "tensorflow",
|
|
19
18
|
kernelFunc: i
|
|
20
19
|
};
|
|
21
|
-
|
|
20
|
+
o(g);
|
|
22
21
|
function N(t) {
|
|
23
22
|
const { dy: e, x: n, gamma: s } = t.inputs;
|
|
24
23
|
return d(() => {
|
|
25
|
-
const r = n.shape[n.shape.length - 1],
|
|
26
|
-
return [c.mul(
|
|
24
|
+
const r = n.shape[n.shape.length - 1], a = n.square().mean(-1, !0), m = a.add(1e-8).rsqrt(), u = n.mul(m), l = e.mul(u).sum([0, 1]), c = e.mul(s), k = c.mul(n).sum(-1, !0).div(r);
|
|
25
|
+
return [c.mul(m).sub(n.mul(k).mul(m).div(a.add(1e-8))), l];
|
|
27
26
|
});
|
|
28
27
|
}
|
|
29
28
|
const S = {
|
|
@@ -31,10 +30,10 @@ const S = {
|
|
|
31
30
|
backendName: "cpu",
|
|
32
31
|
kernelFunc: N
|
|
33
32
|
};
|
|
34
|
-
|
|
33
|
+
o(S);
|
|
35
34
|
const p = {
|
|
36
35
|
kernelName: "RMSNormGrad",
|
|
37
36
|
backendName: "tensorflow",
|
|
38
37
|
kernelFunc: N
|
|
39
38
|
};
|
|
40
|
-
|
|
39
|
+
o(p);
|
package/dist/ops/cpu/qkv.js
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { r as
|
|
3
|
-
import {
|
|
4
|
-
import { s as x } from "../../split-DrfihRpZ.js";
|
|
1
|
+
import { p as q } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { r as o } from "../../reshape-ByE68wS9.js";
|
|
3
|
+
import { s as x } from "../../split-CVLc0w--.js";
|
|
5
4
|
function v(p) {
|
|
6
5
|
const { x: c, kernel: K } = p.inputs, { heads: n, packed: C } = p.attrs;
|
|
7
6
|
if (C)
|
|
@@ -10,18 +9,18 @@ function v(p) {
|
|
|
10
9
|
a.dispose();
|
|
11
10
|
const d = o(i, [s, e, 3 * t]);
|
|
12
11
|
i.dispose();
|
|
13
|
-
const [k,
|
|
12
|
+
const [k, l, m] = x(d, 3, -1);
|
|
14
13
|
d.dispose();
|
|
15
14
|
const r = t / n, f = o(k, [s, e, n, r]);
|
|
16
15
|
k.dispose();
|
|
17
16
|
const w = f.transpose([0, 2, 1, 3]);
|
|
18
17
|
f.dispose();
|
|
19
|
-
const h = o(
|
|
20
|
-
|
|
18
|
+
const h = o(l, [s, e, n, r]);
|
|
19
|
+
l.dispose();
|
|
21
20
|
const N = h.transpose([0, 2, 1, 3]);
|
|
22
21
|
h.dispose();
|
|
23
|
-
const u = o(
|
|
24
|
-
|
|
22
|
+
const u = o(m, [s, e, n, r]);
|
|
23
|
+
m.dispose();
|
|
25
24
|
const T = u.transpose([0, 2, 1, 3]);
|
|
26
25
|
return u.dispose(), [w, N, T];
|
|
27
26
|
}
|
package/dist/ops/cpu/rope.js
CHANGED
|
@@ -1,9 +1,8 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { r as
|
|
3
|
-
import {
|
|
4
|
-
import {
|
|
5
|
-
import {
|
|
6
|
-
import { c as T } from "../../concat-BHlIJeyT.js";
|
|
1
|
+
import { p as I } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { r as y } from "../../range-BklejeeW.js";
|
|
3
|
+
import { g as F } from "../../gather-D3JcZUaI.js";
|
|
4
|
+
import { s as E } from "../../stack-Cf4n9h0N.js";
|
|
5
|
+
import { c as T } from "../../concat-BV8bt5H-.js";
|
|
7
6
|
function U(c, r, p, e, n) {
|
|
8
7
|
const t = e.shape[3], s = p;
|
|
9
8
|
if (s > t) return e;
|
|
@@ -1,25 +1,23 @@
|
|
|
1
|
-
import { A as f, B as
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
import {
|
|
5
|
-
import {
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
h(s);
|
|
10
|
-
const n = c(r, "indices", "scatterND", "int32"), t = c(e, "updates", "scatterND");
|
|
1
|
+
import { A as f, C as g, B as r, E as l, ai as N, p as b, c as S, m as h } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { v as D } from "../../scatter_nd_util-lvSiX8q4.js";
|
|
3
|
+
import { r as k } from "../../range-BklejeeW.js";
|
|
4
|
+
import { s as v } from "../../stack-Cf4n9h0N.js";
|
|
5
|
+
import { o as E } from "../../ones-D2rT0xk2.js";
|
|
6
|
+
function I(a, e, s) {
|
|
7
|
+
g(s);
|
|
8
|
+
const n = r(a, "indices", "scatterND", "int32"), t = r(e, "updates", "scatterND");
|
|
11
9
|
D(t, n, s);
|
|
12
|
-
const
|
|
13
|
-
return
|
|
10
|
+
const c = { indices: n, updates: t }, o = { shape: s };
|
|
11
|
+
return l.runKernel(N, c, o);
|
|
14
12
|
}
|
|
15
|
-
const
|
|
16
|
-
function
|
|
17
|
-
const { logits: e, labels: s, dy: n } =
|
|
18
|
-
return
|
|
13
|
+
const C = /* @__PURE__ */ f({ scatterND_: I });
|
|
14
|
+
function K(a) {
|
|
15
|
+
const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u = C(i, d, [t, c]), p = S(e, u), m = n.reshape([t, 1]);
|
|
16
|
+
return h(p, m);
|
|
19
17
|
}
|
|
20
|
-
const
|
|
18
|
+
const L = {
|
|
21
19
|
kernelName: "EfficientScatterSub",
|
|
22
20
|
backendName: "cpu",
|
|
23
|
-
kernelFunc:
|
|
21
|
+
kernelFunc: K
|
|
24
22
|
};
|
|
25
|
-
|
|
23
|
+
b(L);
|
package/dist/ops/dot16.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import { b as d } from "../matMul16
|
|
1
|
+
import { b as d } from "../matMul16-BWRSOCWB.js";
|
|
2
2
|
import { transpose16 as w } from "./transpose16.js";
|
|
3
3
|
import { reshape16 as n } from "./reshape16.js";
|
|
4
4
|
import { isPackedTensor as p } from "../utilities/packed.js";
|
|
5
|
-
import { d as x } from "../tfjs_backend-
|
|
5
|
+
import { d as x } from "../tfjs_backend-y1cvNhLA.js";
|
|
6
6
|
function E(e, s, h = !1, c = !1) {
|
|
7
7
|
if (!p(e) && !p(s))
|
|
8
8
|
return x(e, s);
|
package/dist/ops/gatherSub.js
CHANGED
package/dist/ops/gelu.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import "../index-
|
|
1
|
+
import "../index-DOvlwCh-.js";
|
|
2
2
|
import "./cpu/gelu.js";
|
|
3
3
|
import "./webgl/gelu.js";
|
|
4
|
-
import { d as e, g as i } from "../gelu-
|
|
4
|
+
import { d as e, g as i } from "../gelu-CjNPL4OH.js";
|
|
5
5
|
export {
|
|
6
6
|
e as dGelu,
|
|
7
7
|
i as gelu
|
package/dist/ops/grads/add16.js
CHANGED
|
@@ -1,27 +1,26 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { sum16 as
|
|
3
|
-
import { reshape16 as
|
|
4
|
-
|
|
5
|
-
const m = {
|
|
1
|
+
import { u as i, a3 as h, a4 as d } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { sum16 as c } from "../sum16.js";
|
|
3
|
+
import { reshape16 as p } from "../reshape16.js";
|
|
4
|
+
const A = {
|
|
6
5
|
kernelName: "Add16",
|
|
7
6
|
inputsToSave: ["a", "b"],
|
|
8
|
-
gradFunc: (s,
|
|
9
|
-
const [t, a] =
|
|
7
|
+
gradFunc: (s, u) => {
|
|
8
|
+
const [t, a] = u, n = h(t.shape, a.shape);
|
|
10
9
|
if (Array.isArray(s))
|
|
11
10
|
throw new Error("Add16 gradFunc expected dy to be a Tensor but got an array");
|
|
12
11
|
return { a: () => {
|
|
13
12
|
let e = s;
|
|
14
13
|
const r = d(t.shape, n);
|
|
15
|
-
r.length > 0 && (e =
|
|
16
|
-
const o =
|
|
14
|
+
r.length > 0 && (e = c(e, r));
|
|
15
|
+
const o = p(e, t.shape);
|
|
17
16
|
return e.dispose(), o;
|
|
18
17
|
}, b: () => {
|
|
19
18
|
let e = s;
|
|
20
19
|
const r = d(a.shape, n);
|
|
21
|
-
r.length > 0 && (e =
|
|
22
|
-
const o =
|
|
20
|
+
r.length > 0 && (e = c(e, r));
|
|
21
|
+
const o = p(e, a.shape);
|
|
23
22
|
return e.dispose(), o;
|
|
24
23
|
} };
|
|
25
24
|
}
|
|
26
25
|
};
|
|
27
|
-
|
|
26
|
+
i(A);
|
|
@@ -1,7 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { m as o } from "../../matMul16
|
|
3
|
-
import { transpose16 as
|
|
4
|
-
import { a as c } from "../../tensor_util-DV-FP5Q3.js";
|
|
1
|
+
import { u as m } from "../../index-DOvlwCh-.js";
|
|
2
|
+
import { m as o } from "../../matMul16-BWRSOCWB.js";
|
|
3
|
+
import { transpose16 as c } from "../transpose16.js";
|
|
5
4
|
const l = {
|
|
6
5
|
kernelName: "AttentionMask",
|
|
7
6
|
inputsToSave: ["q", "k"],
|
|
@@ -13,10 +12,10 @@ const l = {
|
|
|
13
12
|
return {
|
|
14
13
|
q: () => o(r, i, e),
|
|
15
14
|
k: () => {
|
|
16
|
-
const t = o(a, r, e, !0, !1), u =
|
|
15
|
+
const t = o(a, r, e, !0, !1), u = c(t, [0, 1, 3, 2]);
|
|
17
16
|
return t.dispose(), u;
|
|
18
17
|
}
|
|
19
18
|
};
|
|
20
19
|
}
|
|
21
20
|
};
|
|
22
|
-
|
|
21
|
+
m(l);
|
package/dist/ops/grads/gelu.js
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a as
|
|
3
|
-
import "../../tensor_util-DV-FP5Q3.js";
|
|
1
|
+
import "../../index-DOvlwCh-.js";
|
|
2
|
+
import { a as e } from "../../gelu-CjNPL4OH.js";
|
|
4
3
|
export {
|
|
5
|
-
|
|
4
|
+
e as geluGradConfig
|
|
6
5
|
};
|
|
@@ -1,9 +1,8 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { a
|
|
3
|
-
import "../../gelu-
|
|
1
|
+
import "../../index-DOvlwCh-.js";
|
|
2
|
+
import { a } from "../../matMul16-BWRSOCWB.js";
|
|
3
|
+
import "../../gelu-CjNPL4OH.js";
|
|
4
4
|
import "../transpose16.js";
|
|
5
5
|
import "../reshape16.js";
|
|
6
|
-
import "../../tensor_util-DV-FP5Q3.js";
|
|
7
6
|
export {
|
|
8
|
-
|
|
7
|
+
a as matMul16GradConfig
|
|
9
8
|
};
|
|
@@ -1,18 +1,17 @@
|
|
|
1
|
-
import { e as
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
return l().runKernel("MatMulGeluGrad", { dy: e, x: r, kernel: n });
|
|
1
|
+
import { u as a, e as o } from "../../index-DOvlwCh-.js";
|
|
2
|
+
function s(e, n, r) {
|
|
3
|
+
return o().runKernel("MatMulGeluGrad", { dy: e, x: n, kernel: r });
|
|
5
4
|
}
|
|
6
|
-
const
|
|
5
|
+
const d = {
|
|
7
6
|
kernelName: "MatMulGelu",
|
|
8
7
|
inputsToSave: ["x", "kernel"],
|
|
9
8
|
outputsToSave: [],
|
|
10
|
-
gradFunc: (e,
|
|
11
|
-
const [
|
|
9
|
+
gradFunc: (e, n) => {
|
|
10
|
+
const [r, u] = n, [t, l] = s(e, r, u);
|
|
12
11
|
return {
|
|
13
|
-
x: () =>
|
|
14
|
-
kernel: () =>
|
|
12
|
+
x: () => t,
|
|
13
|
+
kernel: () => l
|
|
15
14
|
};
|
|
16
15
|
}
|
|
17
16
|
};
|
|
18
|
-
|
|
17
|
+
a(d);
|
|
@@ -1,21 +1,20 @@
|
|
|
1
|
-
import {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
return t().runKernel("RMSNormGrad", { dy: r, x: a, gamma: m });
|
|
1
|
+
import { u as t, e as u } from "../../index-DOvlwCh-.js";
|
|
2
|
+
function g(r, a, n) {
|
|
3
|
+
return u().runKernel("RMSNormGrad", { dy: r, x: a, gamma: n });
|
|
5
4
|
}
|
|
6
5
|
const s = {
|
|
7
6
|
kernelName: "RMSNorm",
|
|
8
7
|
inputsToSave: ["x", "gamma"],
|
|
9
8
|
outputsToSave: [],
|
|
10
9
|
gradFunc: (r, a) => {
|
|
11
|
-
const [
|
|
10
|
+
const [n, e] = a, [m, o] = g(r, n, e);
|
|
12
11
|
return {
|
|
13
|
-
x: () =>
|
|
14
|
-
gamma: () =>
|
|
12
|
+
x: () => m,
|
|
13
|
+
gamma: () => o
|
|
15
14
|
};
|
|
16
15
|
}
|
|
17
16
|
};
|
|
18
|
-
|
|
17
|
+
t(s);
|
|
19
18
|
export {
|
|
20
19
|
s as normRMSGradConfig
|
|
21
20
|
};
|
package/dist/ops/grads/pack16.js
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import { b as
|
|
3
|
-
import "../../slice-
|
|
4
|
-
import "../../tensor_util-DV-FP5Q3.js";
|
|
1
|
+
import "../../index-DOvlwCh-.js";
|
|
2
|
+
import { b as i } from "../../pack16-nQ6JaLo-.js";
|
|
3
|
+
import "../../slice-BfEGSH82.js";
|
|
5
4
|
export {
|
|
6
|
-
|
|
5
|
+
i as packGradConfig
|
|
7
6
|
};
|