@genai-fi/nanogpt 0.10.3 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.d.ts +10 -5
- package/dist/Generator.js +1789 -1765
- package/dist/{RealDiv-KAPDe8zB.js → RealDiv-Ds-jvL09.js} +22 -22
- package/dist/{Reshape-BYkmUnAv.js → Reshape-Cd6e-Otn.js} +1 -1
- package/dist/{Reshape-Zt6eb7yh.js → Reshape-Ct266DEk.js} +9 -9
- package/dist/TeachableLLM.d.ts +4 -3
- package/dist/TeachableLLM.js +14 -14
- package/dist/Trainer.d.ts +2 -2
- package/dist/Trainer.js +6 -6
- package/dist/{axis_util-BaG7mf5A.js → axis_util-DofAuy0p.js} +3 -3
- package/dist/backend.js +2 -2
- package/dist/{backend_util-RCe-rHaj.js → backend_util-C7NWHpv7.js} +7 -7
- package/dist/{backend_webgpu-DE3ACOLx.js → backend_webgpu-B0Vls736.js} +10 -10
- package/dist/{broadcast_to-B3eYlZm7.js → broadcast_to-DDaNMbX7.js} +2 -2
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +2 -2
- package/dist/checks/normRMS.js +4 -4
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +4 -4
- package/dist/checks/rope.js +2 -2
- package/dist/{clip_by_value-BnO7-a88.js → clip_by_value-Dn5tzexi.js} +4 -4
- package/dist/complex-DClmWqJt.js +11 -0
- package/dist/{concat-BV8bt5H-.js → concat-C6X3AAlQ.js} +1 -1
- package/dist/{concat_util-DpW8mL_l.js → concat_util-CHsJFZJJ.js} +1 -1
- package/dist/{dataset-BcwmTGYc.js → dataset-DcjWqUVQ.js} +7 -7
- package/dist/{dropout-BcvN9JYi.js → dropout-OxuaJz6z.js} +11 -11
- package/dist/{expand_dims-DT4tEPwA.js → expand_dims-BzfJK2uc.js} +3 -3
- package/dist/{exports_initializers-Hta_rEnm.js → exports_initializers-eS9QJ6ut.js} +1 -1
- package/dist/{floor-D5QdR_le.js → floor-DIb-lN_u.js} +1 -1
- package/dist/gather-BcO5UQNJ.js +9 -0
- package/dist/{gelu-CjNPL4OH.js → gelu-DqTbCx5x.js} +1 -1
- package/dist/{gpgpu_math-DAOmgtXR.js → gpgpu_math-CJcbnKPC.js} +2 -2
- package/dist/{index-DOvlwCh-.js → index-D0RBWjq8.js} +52 -52
- package/dist/{index-BwexR4lA.js → index-Dj5TkmPY.js} +89 -89
- package/dist/{kernel_funcs_utils-CCzYdUZg.js → kernel_funcs_utils-CSaumNDs.js} +11 -11
- package/dist/layers/BaseLayer.js +2 -2
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +5 -5
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +4 -4
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +17 -17
- package/dist/log_sum_exp-VLZgbFAH.js +39 -0
- package/dist/main.d.ts +1 -1
- package/dist/main.js +9 -9
- package/dist/{matMul16-BWRSOCWB.js → matMul16-cDxwemKj.js} +7 -7
- package/dist/{matMulGelu-CzfgT6Wq.js → matMulGelu-B2s_80-H.js} +18 -18
- package/dist/{mat_mul-SjpJRLyL.js → mat_mul-DxpNTCRz.js} +3 -3
- package/dist/{mod-AnXEvvpo.js → mod-PrOKlFxH.js} +1 -1
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +9 -9
- package/dist/{ones-D2rT0xk2.js → ones-BX_wEgzB.js} +3 -3
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +6 -6
- package/dist/ops/cpu/adamMoments.js +2 -2
- package/dist/ops/cpu/appendCache.js +5 -5
- package/dist/ops/cpu/attentionMask.js +10 -10
- package/dist/ops/cpu/fusedSoftmax.js +2 -2
- package/dist/ops/cpu/gatherSub.js +6 -6
- package/dist/ops/cpu/gelu.js +9 -9
- package/dist/ops/cpu/matMul16.js +2 -2
- package/dist/ops/cpu/matMulGelu.js +3 -3
- package/dist/ops/cpu/matMulMul.js +1 -1
- package/dist/ops/cpu/mulDropout.js +1 -1
- package/dist/ops/cpu/normRMS.js +3 -3
- package/dist/ops/cpu/qkv.js +3 -3
- package/dist/ops/cpu/rope.js +9 -9
- package/dist/ops/cpu/scatterSub.js +11 -11
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +4 -4
- package/dist/ops/grads/attentionMask.js +2 -2
- package/dist/ops/grads/gelu.js +2 -2
- package/dist/ops/grads/matMul16.js +3 -3
- package/dist/ops/grads/matMulGelu.js +3 -3
- package/dist/ops/grads/normRMS.js +7 -7
- package/dist/ops/grads/pack16.js +3 -3
- package/dist/ops/grads/qkv.js +6 -6
- package/dist/ops/grads/rope.js +2 -2
- package/dist/ops/grads/softmax16.js +1 -1
- package/dist/ops/grads/unpack16.js +2 -2
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +2 -2
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +2 -2
- package/dist/ops/qkv.js +1 -1
- package/dist/ops/reshape16.js +6 -6
- package/dist/ops/rope.js +2 -2
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +1 -1
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +3 -3
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +2 -2
- package/dist/ops/webgl/adamMoments.js +1 -1
- package/dist/ops/webgl/appendCache.js +1 -1
- package/dist/ops/webgl/attentionMask.js +4 -4
- package/dist/ops/webgl/fusedSoftmax.js +6 -6
- package/dist/ops/webgl/gatherSub.js +1 -1
- package/dist/ops/webgl/gelu.js +2 -2
- package/dist/ops/webgl/log.js +3 -3
- package/dist/ops/webgl/matMul16.js +11 -11
- package/dist/ops/webgl/matMulGelu.js +4 -4
- package/dist/ops/webgl/matMulMul.js +7 -7
- package/dist/ops/webgl/mulDropout.js +1 -1
- package/dist/ops/webgl/normRMS.js +7 -7
- package/dist/ops/webgl/qkv.js +1 -1
- package/dist/ops/webgl/rope.js +4 -4
- package/dist/ops/webgl/scatterSub.js +1 -1
- package/dist/ops/webgpu/adamAdjust.js +3 -3
- package/dist/ops/webgpu/adamMoments.js +3 -3
- package/dist/ops/webgpu/add16.js +1 -1
- package/dist/ops/webgpu/appendCache.js +3 -3
- package/dist/ops/webgpu/attentionMask.js +5 -5
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +5 -5
- package/dist/ops/webgpu/gatherSub.js +5 -5
- package/dist/ops/webgpu/gelu.js +3 -3
- package/dist/ops/webgpu/matMul16.js +18 -18
- package/dist/ops/webgpu/matMul16_program.js +2 -2
- package/dist/ops/webgpu/mul16.js +4 -4
- package/dist/ops/webgpu/normRMS.js +6 -6
- package/dist/ops/webgpu/normRMSGrad.js +4 -4
- package/dist/ops/webgpu/pack16.js +1 -1
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +6 -6
- package/dist/ops/webgpu/rope.js +3 -3
- package/dist/ops/webgpu/scatterSub.js +3 -3
- package/dist/ops/webgpu/slice16.js +4 -4
- package/dist/ops/webgpu/softmax16.js +2 -2
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +1 -1
- package/dist/ops/webgpu/sub16.js +4 -4
- package/dist/ops/webgpu/sum16.js +6 -6
- package/dist/ops/webgpu/transpose16.js +2 -2
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +3 -3
- package/dist/ops/webgpu/unpack16.js +3 -3
- package/dist/ops/webgpu/utils/binary_op.js +3 -3
- package/dist/ops/webgpu/utils/reductions.js +4 -4
- package/dist/{ops-B5yanEdW.js → ops-FJapAPfm.js} +56 -56
- package/dist/{pack16-nQ6JaLo-.js → pack16-k4jq6aMX.js} +7 -7
- package/dist/patches/webgpu_backend.js +7 -7
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +8 -8
- package/dist/{random_width-or-CEftb.js → random_width-UGQn4OWb.js} +33 -33
- package/dist/range-CuGvVN2c.js +10 -0
- package/dist/{relu-CP0ZcxWO.js → relu-Cf80uA2p.js} +1 -1
- package/dist/{reshape-ByE68wS9.js → reshape-CkjKPPqB.js} +1 -1
- package/dist/{resize_nearest_neighbor-B19mCEg2.js → resize_nearest_neighbor-DB8k9KN_.js} +43 -43
- package/dist/{rope-Ir4mTyD1.js → rope-BmZmp9uP.js} +1 -1
- package/dist/{scatter_nd_util-lvSiX8q4.js → scatter_nd_util-BY22Cc-C.js} +1 -1
- package/dist/{selu_util-kbhpTdYD.js → selu_util-BuLbmbrl.js} +5 -5
- package/dist/{shared-DT1TkE6w.js → shared-B7USJZgw.js} +1 -1
- package/dist/{shared-dntlHIDQ.js → shared-BQboIImQ.js} +86 -86
- package/dist/{slice-BfEGSH82.js → slice-Aqy7KbJh.js} +3 -3
- package/dist/{slice_util-uTKwiEpW.js → slice_util-D8CQRenR.js} +7 -7
- package/dist/{softmax-CA5jFsLR.js → softmax-faLoUZVT.js} +1 -1
- package/dist/{split-CVLc0w--.js → split-BNz5jcGc.js} +3 -3
- package/dist/{squeeze-C7Z2srUo.js → squeeze--YMgaAAf.js} +2 -2
- package/dist/{stack-Cf4n9h0N.js → stack-WJK22CFn.js} +1 -1
- package/dist/{step-CINUs5QB.js → step-dXR33iOg.js} +32 -32
- package/dist/sum-BdplSvq_.js +11 -0
- package/dist/tensor-BQqrDvpx.js +8 -0
- package/dist/tensor1d-LxP9asMm.js +11 -0
- package/dist/{tensor2d-Bs9wZRc7.js → tensor2d-BN1sSfQO.js} +3 -3
- package/dist/{tensor4d-BARPdTaS.js → tensor4d-DVwr7pLF.js} +1 -1
- package/dist/{tfjs_backend-y1cvNhLA.js → tfjs_backend-Vi4JfLzT.js} +28 -28
- package/dist/{tile-mbfagpsB.js → tile-CvN_LyVr.js} +4 -4
- package/dist/tokeniser/BaseTokeniser.d.ts +27 -0
- package/dist/tokeniser/BaseTokeniser.js +94 -0
- package/dist/tokeniser/CharTokeniser.d.ts +4 -3
- package/dist/tokeniser/CharTokeniser.js +46 -32
- package/dist/tokeniser/bpe.d.ts +4 -3
- package/dist/tokeniser/bpe.js +60 -45
- package/dist/tokeniser/type.d.ts +11 -0
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.d.ts +2 -2
- package/dist/training/DatasetBuilder.js +32 -36
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.d.ts +3 -3
- package/dist/training/Trainer.js +2 -2
- package/dist/training/sparseCrossEntropy.js +3 -3
- package/dist/{transpose-ClWiBS_b.js → transpose-JawVKyZy.js} +5 -5
- package/dist/{unsorted_segment_sum-BDDhB_E6.js → unsorted_segment_sum-LAbmE9G4.js} +78 -78
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.js +1 -1
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.js +5 -5
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-WawDEaAb.js → variable-DQ9yYgEU.js} +1 -1
- package/dist/{webgpu_program-DuOXPQol.js → webgpu_program-CAE4RICo.js} +3 -3
- package/dist/{webgpu_util-RxEF33Rj.js → webgpu_util-BdovYhXr.js} +1 -1
- package/dist/{zeros-KnWaWf-X.js → zeros-DeiE2zTa.js} +2 -2
- package/dist/{zeros_like-DvE73F4e.js → zeros_like-BAz3iKru.js} +77 -77
- package/package.json +1 -1
- package/dist/complex-DjxcVmoX.js +0 -11
- package/dist/gather-D3JcZUaI.js +0 -9
- package/dist/log_sum_exp-ngO0-4pK.js +0 -39
- package/dist/range-BklejeeW.js +0 -10
- package/dist/sum-DWAtNGez.js +0 -11
- package/dist/tensor-DJoc7gJU.js +0 -8
- package/dist/tensor1d-D11P_7Dp.js +0 -11
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { m as b, s as I, r as k } from "../../RealDiv-
|
|
2
|
-
import { r as v } from "../../Reshape-
|
|
3
|
-
import {
|
|
4
|
-
import { e as S } from "../../axis_util-
|
|
1
|
+
import { m as b, s as I, r as k } from "../../RealDiv-Ds-jvL09.js";
|
|
2
|
+
import { r as v } from "../../Reshape-Ct266DEk.js";
|
|
3
|
+
import { h as w, af as P } from "../../index-D0RBWjq8.js";
|
|
4
|
+
import { e as S } from "../../axis_util-DofAuy0p.js";
|
|
5
5
|
class T {
|
|
6
6
|
variableNames = ["logits", "maxLogits"];
|
|
7
7
|
outputShape;
|
|
@@ -60,11 +60,11 @@ function L(r) {
|
|
|
60
60
|
o.disposeIntermediateTensorInfo(d);
|
|
61
61
|
const p = I({ inputs: { x: s }, backend: o, attrs: { axis: i, keepDims: !1 } }), a = v({ inputs: { x: p }, backend: o, attrs: { shape: f } });
|
|
62
62
|
if (n !== void 0 && n > 0) {
|
|
63
|
-
const
|
|
63
|
+
const h = new C(e.shape), g = o.runWebGLProgram(h, [s, a], "float32", [
|
|
64
64
|
[n],
|
|
65
65
|
[c ?? Math.random() * 1e4]
|
|
66
66
|
]);
|
|
67
|
-
return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a),
|
|
67
|
+
return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a), g;
|
|
68
68
|
}
|
|
69
69
|
const x = k({ inputs: { a: s, b: a }, backend: o });
|
|
70
70
|
return o.disposeIntermediateTensorInfo(s), o.disposeIntermediateTensorInfo(p), o.disposeIntermediateTensorInfo(a), x;
|
package/dist/ops/webgl/gelu.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { u as s, C as i } from "../../kernel_funcs_utils-
|
|
1
|
+
import { h as a } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { u as s, C as i } from "../../kernel_funcs_utils-CSaumNDs.js";
|
|
3
3
|
const t = 0.7978845608028654, r = 0.044715, c = i + `
|
|
4
4
|
float x3 = x * x * x;
|
|
5
5
|
float inner = x + ${r} * x3;
|
package/dist/ops/webgl/log.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { u as s, l as N } from "../../kernel_funcs_utils-
|
|
3
|
-
import { y as l } from "../../shared-
|
|
1
|
+
import { h as e, ao as r } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { u as s, l as N } from "../../kernel_funcs_utils-CSaumNDs.js";
|
|
3
|
+
import { y as l } from "../../shared-B7USJZgw.js";
|
|
4
4
|
const a = N + `
|
|
5
5
|
return x < 0.0 ? NAN : log(x);
|
|
6
6
|
`, t = `
|
|
@@ -1,27 +1,27 @@
|
|
|
1
1
|
import { isPackedTensor as k } from "../../utilities/packed.js";
|
|
2
|
-
import {
|
|
2
|
+
import { h as g, m as M, b as m } from "../../index-D0RBWjq8.js";
|
|
3
3
|
import { matMulMul as N } from "../matMulMul.js";
|
|
4
4
|
import { matMulGelu as U } from "../matMulGelu.js";
|
|
5
|
-
import { m as G } from "../../mat_mul-
|
|
6
|
-
import { r as
|
|
7
|
-
import { t as
|
|
8
|
-
function P(
|
|
9
|
-
const { A: r, B: o } =
|
|
5
|
+
import { m as G } from "../../mat_mul-DxpNTCRz.js";
|
|
6
|
+
import { r as h } from "../../reshape-CkjKPPqB.js";
|
|
7
|
+
import { t as w } from "../../transpose-JawVKyZy.js";
|
|
8
|
+
function P(l) {
|
|
9
|
+
const { A: r, B: o } = l.inputs, { transposeA: p, transposeB: c, scale: u, activation: A, scaleA: d, scaleB: f, forceOutputShape: t, perm: n } = l.attrs, B = !k(r), v = !k(o);
|
|
10
10
|
if (B && v) {
|
|
11
11
|
const a = d !== void 0 ? M(r, m(d)) : r, i = f !== void 0 ? M(o, m(f)) : o;
|
|
12
12
|
let e;
|
|
13
|
-
if (u !== void 0 ? e = N(a, i, m(u),
|
|
13
|
+
if (u !== void 0 ? e = N(a, i, m(u), p, c) : A === "gelu" ? e = U(a, i) : e = G(a, i, p, c), n)
|
|
14
14
|
if (t) {
|
|
15
|
-
const s =
|
|
15
|
+
const s = h(e, t);
|
|
16
16
|
e.dispose();
|
|
17
|
-
const b =
|
|
17
|
+
const b = w(s, n);
|
|
18
18
|
return s.dispose(), b;
|
|
19
19
|
} else {
|
|
20
|
-
const s =
|
|
20
|
+
const s = w(e, n);
|
|
21
21
|
return e.dispose(), s;
|
|
22
22
|
}
|
|
23
23
|
else if (t) {
|
|
24
|
-
const s =
|
|
24
|
+
const s = h(e, t);
|
|
25
25
|
return e.dispose(), s;
|
|
26
26
|
} else
|
|
27
27
|
return e;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import "../../index-
|
|
2
|
-
import "../../Reshape-
|
|
3
|
-
import { a as m, b as o, c as p } from "../../matMulGelu-
|
|
4
|
-
import "../../mat_mul-
|
|
1
|
+
import "../../index-D0RBWjq8.js";
|
|
2
|
+
import "../../Reshape-Ct266DEk.js";
|
|
3
|
+
import { a as m, b as o, c as p } from "../../matMulGelu-B2s_80-H.js";
|
|
4
|
+
import "../../mat_mul-DxpNTCRz.js";
|
|
5
5
|
export {
|
|
6
6
|
m as MATMUL_SHARED_DIM_THRESHOLD,
|
|
7
7
|
o as batchMatMulGeluImpl,
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { b as c } from "../../matMulGelu-
|
|
3
|
-
const
|
|
1
|
+
import { h as u } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { b as c } from "../../matMulGelu-B2s_80-H.js";
|
|
3
|
+
const M = `
|
|
4
4
|
return a * b;
|
|
5
5
|
`;
|
|
6
|
-
function
|
|
6
|
+
function p(r) {
|
|
7
7
|
const { inputs: n, backend: a, attrs: o } = r, { x: t, kernel: e, y: l } = n, { transposeA: s, transposeB: i } = o;
|
|
8
8
|
if (t === void 0 || e === void 0)
|
|
9
9
|
throw new Error("BatchMatMul requires two input tensors.");
|
|
@@ -13,16 +13,16 @@ function M(r) {
|
|
|
13
13
|
transposeA: s,
|
|
14
14
|
transposeB: i,
|
|
15
15
|
backend: a,
|
|
16
|
-
activationSnippet:
|
|
16
|
+
activationSnippet: M,
|
|
17
17
|
multiplier: l
|
|
18
18
|
});
|
|
19
19
|
}
|
|
20
20
|
const m = {
|
|
21
21
|
kernelName: "MatMulMul",
|
|
22
22
|
backendName: "webgl",
|
|
23
|
-
kernelFunc:
|
|
23
|
+
kernelFunc: p
|
|
24
24
|
};
|
|
25
25
|
u(m);
|
|
26
26
|
export {
|
|
27
|
-
|
|
27
|
+
p as batchMatMulKernel
|
|
28
28
|
};
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { s as x } from "../../sum-
|
|
1
|
+
import { h as p, e as G } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { s as x } from "../../sum-BdplSvq_.js";
|
|
3
3
|
class y {
|
|
4
4
|
variableNames = ["x", "meanSquare", "gamma"];
|
|
5
5
|
outputShape;
|
|
@@ -28,7 +28,7 @@ const C = {
|
|
|
28
28
|
backendName: "webgl",
|
|
29
29
|
kernelFunc: v
|
|
30
30
|
};
|
|
31
|
-
|
|
31
|
+
p(C);
|
|
32
32
|
class b {
|
|
33
33
|
variableNames = ["x", "meanSquare", "dyGamma", "dyXMean"];
|
|
34
34
|
outputShape;
|
|
@@ -73,14 +73,14 @@ function M(t) {
|
|
|
73
73
|
l.dispose();
|
|
74
74
|
const f = new b(n, m, u), S = r.runWebGLProgram(f, [e, d, s, i], "float32");
|
|
75
75
|
s.dispose(), i.dispose();
|
|
76
|
-
const h = new N(n, m, u),
|
|
76
|
+
const h = new N(n, m, u), g = r.runWebGLProgram(h, [e, d, a], "float32");
|
|
77
77
|
d.dispose();
|
|
78
|
-
const q = x(G().makeTensorFromTensorInfo(
|
|
79
|
-
return r.disposeIntermediateTensorInfo(
|
|
78
|
+
const q = x(G().makeTensorFromTensorInfo(g), [0, 1]);
|
|
79
|
+
return r.disposeIntermediateTensorInfo(g), [S, q];
|
|
80
80
|
}
|
|
81
81
|
const k = {
|
|
82
82
|
kernelName: "RMSNormGrad",
|
|
83
83
|
backendName: "webgl",
|
|
84
84
|
kernelFunc: M
|
|
85
85
|
};
|
|
86
|
-
|
|
86
|
+
p(k);
|
package/dist/ops/webgl/qkv.js
CHANGED
package/dist/ops/webgl/rope.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { h as l } from "../../index-D0RBWjq8.js";
|
|
2
2
|
class g {
|
|
3
3
|
variableNames = ["x", "sin", "cos"];
|
|
4
4
|
outputShape;
|
|
@@ -45,12 +45,12 @@ class g {
|
|
|
45
45
|
}
|
|
46
46
|
}
|
|
47
47
|
function f(o) {
|
|
48
|
-
const { x: t } = o.inputs, { pastLen: s, ropeCache: e, negSin: n } = o.attrs, a = n ? e.getNegSin() : e.getSin(), r = e.getCos(), d = o.backend, i = t.shape[0], c = t.shape[1], p = t.shape[2], u = t.shape[3],
|
|
49
|
-
return d.runWebGLProgram(
|
|
48
|
+
const { x: t } = o.inputs, { pastLen: s, ropeCache: e, negSin: n } = o.attrs, a = n ? e.getNegSin() : e.getSin(), r = e.getCos(), d = o.backend, i = t.shape[0], c = t.shape[1], p = t.shape[2], u = t.shape[3], h = new g(i, c, p, u);
|
|
49
|
+
return d.runWebGLProgram(h, [t, a, r], "float32", [[s]]);
|
|
50
50
|
}
|
|
51
51
|
const m = {
|
|
52
52
|
kernelName: "Rope",
|
|
53
53
|
backendName: "webgl",
|
|
54
54
|
kernelFunc: f
|
|
55
55
|
};
|
|
56
|
-
|
|
56
|
+
l(m);
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as p } from "../../webgpu_program-
|
|
2
|
-
import { f as d, c as l } from "../../webgpu_util-
|
|
3
|
-
import {
|
|
1
|
+
import { e as p } from "../../webgpu_program-CAE4RICo.js";
|
|
2
|
+
import { f as d, c as l } from "../../webgpu_util-BdovYhXr.js";
|
|
3
|
+
import { h as f, a7 as c } from "../../index-D0RBWjq8.js";
|
|
4
4
|
class h {
|
|
5
5
|
variableNames = ["moments", "value"];
|
|
6
6
|
outputShape;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as u } from "../../webgpu_program-
|
|
2
|
-
import { f as p, c as d } from "../../webgpu_util-
|
|
3
|
-
import {
|
|
1
|
+
import { e as u } from "../../webgpu_program-CAE4RICo.js";
|
|
2
|
+
import { f as p, c as d } from "../../webgpu_util-BdovYhXr.js";
|
|
3
|
+
import { h as c, a7 as f } from "../../index-D0RBWjq8.js";
|
|
4
4
|
class l {
|
|
5
5
|
variableNames = ["moments", "gradient"];
|
|
6
6
|
outputShape;
|
package/dist/ops/webgpu/add16.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { isPackedTensor as T } from "../../utilities/packed.js";
|
|
2
|
-
import { e as p } from "../../webgpu_program-
|
|
3
|
-
import { f as d, c as u } from "../../webgpu_util-
|
|
4
|
-
import {
|
|
2
|
+
import { e as p } from "../../webgpu_program-CAE4RICo.js";
|
|
3
|
+
import { f as d, c as u } from "../../webgpu_util-BdovYhXr.js";
|
|
4
|
+
import { h as S, a7 as g } from "../../index-D0RBWjq8.js";
|
|
5
5
|
class x {
|
|
6
6
|
variableNames = ["cache", "item"];
|
|
7
7
|
outputShape;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { h as d, a7 as b } from "../../index-D0RBWjq8.js";
|
|
2
2
|
import { isPackedTensor as p } from "../../utilities/packed.js";
|
|
3
|
-
import { b as l } from "../../matMul16-
|
|
3
|
+
import { b as l } from "../../matMul16-cDxwemKj.js";
|
|
4
4
|
import M from "./attentionMask32_program.js";
|
|
5
5
|
function w(n) {
|
|
6
6
|
const { q: t, k: e } = n.inputs, { divisor: a, pastLen: o } = n.attrs, m = n.backend;
|
|
@@ -11,12 +11,12 @@ function w(n) {
|
|
|
11
11
|
throw new Error("Divisor must be non-zero in AttentionMask");
|
|
12
12
|
if (o < 0)
|
|
13
13
|
throw new Error("pastLen must be non-negative in AttentionMask");
|
|
14
|
-
const u = new M(s, i, k, r, c),
|
|
14
|
+
const u = new M(s, i, k, r, c), h = [
|
|
15
15
|
{ type: "float32", data: [a] },
|
|
16
16
|
{ type: "int32", data: [o] },
|
|
17
17
|
{ type: "float32", data: [Number.NEGATIVE_INFINITY] }
|
|
18
|
-
],
|
|
19
|
-
return m.runWebGPUProgram(u, [t, e],
|
|
18
|
+
], f = t.dtype;
|
|
19
|
+
return m.runWebGPUProgram(u, [t, e], f, h);
|
|
20
20
|
}
|
|
21
21
|
const A = {
|
|
22
22
|
kernelName: "AttentionMask",
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { e as r } from "../../webgpu_program-
|
|
2
|
-
import { f as a, c as u } from "../../webgpu_util-
|
|
1
|
+
import { e as r } from "../../webgpu_program-CAE4RICo.js";
|
|
2
|
+
import { f as a, c as u } from "../../webgpu_util-BdovYhXr.js";
|
|
3
3
|
class p {
|
|
4
4
|
variableNames = ["q", "k"];
|
|
5
5
|
outputShape;
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { e as D } from "../../webgpu_program-
|
|
3
|
-
import { f as $, c as F } from "../../webgpu_util-
|
|
4
|
-
import { r as g } from "../../Reshape-
|
|
5
|
-
import { a as L, c as d } from "../../concat_util-
|
|
1
|
+
import { h as x, af as I, V as c } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { e as D } from "../../webgpu_program-CAE4RICo.js";
|
|
3
|
+
import { f as $, c as F } from "../../webgpu_util-BdovYhXr.js";
|
|
4
|
+
import { r as g } from "../../Reshape-Cd6e-Otn.js";
|
|
5
|
+
import { a as L, c as d } from "../../concat_util-CHsJFZJJ.js";
|
|
6
6
|
class T {
|
|
7
7
|
outputShape;
|
|
8
8
|
shaderKey;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as u } from "../../webgpu_program-
|
|
2
|
-
import { f as h, c as p } from "../../webgpu_util-
|
|
3
|
-
import {
|
|
1
|
+
import { e as u } from "../../webgpu_program-CAE4RICo.js";
|
|
2
|
+
import { f as h, c as p } from "../../webgpu_util-BdovYhXr.js";
|
|
3
|
+
import { h as c, a7 as r } from "../../index-D0RBWjq8.js";
|
|
4
4
|
class l {
|
|
5
5
|
variableNames = ["labels", "logits", "values"];
|
|
6
6
|
outputShape;
|
|
@@ -31,9 +31,9 @@ function d(e) {
|
|
|
31
31
|
const n = new l(i);
|
|
32
32
|
return o.runWebGPUProgram(n, [a, t, s], "float32");
|
|
33
33
|
}
|
|
34
|
-
const
|
|
34
|
+
const f = {
|
|
35
35
|
kernelName: "EfficientGatherSub",
|
|
36
36
|
backendName: "webgpu",
|
|
37
37
|
kernelFunc: d
|
|
38
38
|
};
|
|
39
|
-
c(
|
|
39
|
+
c(f);
|
package/dist/ops/webgpu/gelu.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { e as s } from "../../webgpu_program-
|
|
3
|
-
import { f as n, c as o } from "../../webgpu_util-
|
|
1
|
+
import { h as d } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { e as s } from "../../webgpu_program-CAE4RICo.js";
|
|
3
|
+
import { f as n, c as o } from "../../webgpu_util-BdovYhXr.js";
|
|
4
4
|
import { isPackedTensor as l } from "../../utilities/packed.js";
|
|
5
5
|
const u = 0.7978845608028654, r = 0.044715;
|
|
6
6
|
class c {
|
|
@@ -1,25 +1,25 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { h as H, m as P, b as B, V as y, $ as J } from "../../index-D0RBWjq8.js";
|
|
2
2
|
import { isPackedTensor as R } from "../../utilities/packed.js";
|
|
3
3
|
import { reshape16 as U } from "../reshape16.js";
|
|
4
|
-
import { matMulMul as
|
|
5
|
-
import { matMulGelu as
|
|
6
|
-
import
|
|
7
|
-
import { m as
|
|
8
|
-
import { r as x } from "../../reshape-
|
|
9
|
-
import { t as C } from "../../transpose-
|
|
10
|
-
function
|
|
4
|
+
import { matMulMul as Q } from "../matMulMul.js";
|
|
5
|
+
import { matMulGelu as X } from "../matMulGelu.js";
|
|
6
|
+
import Y from "./matMul16_program.js";
|
|
7
|
+
import { m as Z } from "../../mat_mul-DxpNTCRz.js";
|
|
8
|
+
import { r as x } from "../../reshape-CkjKPPqB.js";
|
|
9
|
+
import { t as C } from "../../transpose-JawVKyZy.js";
|
|
10
|
+
function _(p) {
|
|
11
11
|
const { A: e, B: s } = p.inputs, { transposeA: d, transposeB: f, scale: i, activation: k, scaleA: c, scaleB: u, forceOutputShape: o, perm: h, causalMask: g, pastLen: E } = p.attrs, F = p.backend, S = !R(e), M = !R(s);
|
|
12
12
|
if (S && M) {
|
|
13
13
|
const A = c !== void 0 ? P(e, B(c)) : e, b = u !== void 0 ? P(s, B(u)) : s;
|
|
14
14
|
if (g)
|
|
15
15
|
throw new Error("Causal mask is not supported for unpacked MatMul16.");
|
|
16
16
|
let a;
|
|
17
|
-
if (i !== void 0 ? a =
|
|
17
|
+
if (i !== void 0 ? a = Q(A, b, B(i), d, f) : k === "gelu" ? a = X(A, b) : a = Z(A, b, d, f), h)
|
|
18
18
|
if (o) {
|
|
19
19
|
const r = x(a, o);
|
|
20
20
|
a.dispose();
|
|
21
|
-
const
|
|
22
|
-
return r.dispose(),
|
|
21
|
+
const q = C(r, h);
|
|
22
|
+
return r.dispose(), q;
|
|
23
23
|
} else {
|
|
24
24
|
const r = C(a, h);
|
|
25
25
|
return a.dispose(), r;
|
|
@@ -34,23 +34,23 @@ function $(p) {
|
|
|
34
34
|
throw new Error("When using mixed precision, A must be packed if B is packed.");
|
|
35
35
|
if (!S && M)
|
|
36
36
|
throw new Error("When using mixed precision, B must be packed if A is packed.");
|
|
37
|
-
const l = e.shape.length, m = s.shape.length, W = e.shape.slice(0, -2), z = s.shape.slice(0, -2), v = y(W), w = y(z), I =
|
|
37
|
+
const l = e.shape.length, m = s.shape.length, W = e.shape.slice(0, -2), z = s.shape.slice(0, -2), v = y(W), w = y(z), I = J(e.shape.slice(0, -2), s.shape.slice(0, -2)), N = Math.max(v, w), K = e.shape[l - 2], L = s.shape[m - 2], T = e.shape[l - 1] * 2, V = s.shape[m - 1] * 2, D = U(e, [v, e.shape[l - 2], e.shape[l - 1]]), G = U(s, [w, s.shape[m - 2], s.shape[m - 1]]), t = new Y(N, K, L, T, V, d, f), n = [];
|
|
38
38
|
i !== void 0 && (t.useScale(), n.push({ type: "float32", data: [i] })), c !== void 0 && (t.useScaleA(), n.push({ type: "float32", data: [c] })), u !== void 0 && (t.useScaleB(), n.push({ type: "float32", data: [u] })), k !== void 0 && t.useActivation(k), g && (t.useCausalMask(), n.push({ type: "int32", data: [E || 0] }));
|
|
39
39
|
const O = t.outputShape.length;
|
|
40
40
|
o && (p.attrs.originalShape = t.outputShape);
|
|
41
|
-
const
|
|
42
|
-
t.setOutputShape(
|
|
43
|
-
const
|
|
41
|
+
const $ = o ?? I.concat([t.outputShape[O - 2], t.outputShape[O - 1]]);
|
|
42
|
+
t.setOutputShape($, h);
|
|
43
|
+
const j = F.runWebGPUProgram(
|
|
44
44
|
t,
|
|
45
45
|
[D, G],
|
|
46
46
|
"packedF16",
|
|
47
47
|
n.length > 0 ? n : void 0
|
|
48
48
|
);
|
|
49
|
-
return D.dispose(), G.dispose(),
|
|
49
|
+
return D.dispose(), G.dispose(), j;
|
|
50
50
|
}
|
|
51
51
|
const ee = {
|
|
52
52
|
kernelName: "MatMul16",
|
|
53
53
|
backendName: "webgpu",
|
|
54
|
-
kernelFunc:
|
|
54
|
+
kernelFunc: _
|
|
55
55
|
};
|
|
56
|
-
|
|
56
|
+
H(ee);
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { e as h } from "../../webgpu_program-
|
|
1
|
+
import { V as f } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { e as h } from "../../webgpu_program-CAE4RICo.js";
|
|
3
3
|
class B {
|
|
4
4
|
variableNames = ["A", "B"];
|
|
5
5
|
outputShape;
|
package/dist/ops/webgpu/mul16.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { BinaryOpProgram as
|
|
3
|
-
import { B as
|
|
1
|
+
import { h as t } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { BinaryOpProgram as m } from "./utils/binary_op.js";
|
|
3
|
+
import { B as p } from "../../binary_op_util-pKXltfxI.js";
|
|
4
4
|
function s(e) {
|
|
5
|
-
const { a: r, b: n } = e.inputs, o = e.backend, a = new p
|
|
5
|
+
const { a: r, b: n } = e.inputs, o = e.backend, a = new m(p.MUL, r.shape, n.shape);
|
|
6
6
|
return o.runWebGPUProgram(a, [r, n], "packedF16");
|
|
7
7
|
}
|
|
8
8
|
const c = {
|
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { h as g, a7 as l } from "../../index-D0RBWjq8.js";
|
|
2
2
|
import { createReduceInfo as w, reduce as S } from "./utils/reductions.js";
|
|
3
3
|
import { isPackedTensor as d } from "../../utilities/packed.js";
|
|
4
|
-
import { p as f } from "../../pack16-
|
|
5
|
-
import
|
|
6
|
-
import
|
|
7
|
-
import
|
|
4
|
+
import { p as f } from "../../pack16-k4jq6aMX.js";
|
|
5
|
+
import z from "./normRMS16_program.js";
|
|
6
|
+
import N from "./normRMS32_program.js";
|
|
7
|
+
import b from "./utils/deviceInfo.js";
|
|
8
8
|
function P(c) {
|
|
9
|
-
const { x: e, gamma: s } = c.inputs, m = c.backend, i =
|
|
9
|
+
const { x: e, gamma: s } = c.inputs, m = c.backend, i = b(m), t = d(e), a = d(s), n = t || a, r = !n || t ? e : f(e), p = !n || a ? s : f(s), h = [r, p], o = w(h, -1), u = n ? new z(i, o) : new N(i, o);
|
|
10
10
|
if (l(p.shape, [r.shape[r.shape.length - 1]], "Error in RMSNorm: "), e.shape.length !== 3)
|
|
11
11
|
throw new Error(`rmsNormGPU: input rank ${e.shape.length} not supported, only rank 3 is supported`);
|
|
12
12
|
if (o.inSize !== r.shape[r.shape.length - 1])
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { h as _, a7 as y, e as D } from "../../index-D0RBWjq8.js";
|
|
2
2
|
import { createReduceInfo as X } from "./utils/reductions.js";
|
|
3
|
-
import { f as $ } from "../../webgpu_util-
|
|
4
|
-
import { e as M } from "../../webgpu_program-
|
|
5
|
-
import { p as k, u as R } from "../../pack16-
|
|
3
|
+
import { f as $ } from "../../webgpu_util-BdovYhXr.js";
|
|
4
|
+
import { e as M } from "../../webgpu_program-CAE4RICo.js";
|
|
5
|
+
import { p as k, u as R } from "../../pack16-k4jq6aMX.js";
|
|
6
6
|
import { isPackedTensor as h } from "../../utilities/packed.js";
|
|
7
7
|
import { reshape16 as L } from "../reshape16.js";
|
|
8
8
|
import { sum16 as P } from "../sum16.js";
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { f as o, c as a } from "../../webgpu_util-
|
|
2
|
-
import { e as s } from "../../webgpu_program-
|
|
1
|
+
import { f as o, c as a } from "../../webgpu_util-BdovYhXr.js";
|
|
2
|
+
import { e as s } from "../../webgpu_program-CAE4RICo.js";
|
|
3
3
|
class h {
|
|
4
4
|
outputShape;
|
|
5
5
|
shaderKey = "Pack16";
|
package/dist/ops/webgpu/qkv.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { b as f } from "../../matMul16-
|
|
1
|
+
import { h, a7 as l } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { b as f } from "../../matMul16-cDxwemKj.js";
|
|
3
3
|
import { slice16 as a } from "../slice16.js";
|
|
4
4
|
import { isPackedTensor as u } from "../../utilities/packed.js";
|
|
5
|
-
function
|
|
5
|
+
function k(i) {
|
|
6
6
|
const { x: n, kernel: c } = i.inputs, { heads: e } = i.attrs, r = n.shape[0], t = n.shape[1], s = n.shape[2], p = u(n);
|
|
7
7
|
if (l(c.shape, [p ? s * 2 : s, 3 * s], "Error in QKV: "), s % e !== 0)
|
|
8
8
|
throw new Error(`Channel dimension ${s} must be divisible by number of heads ${e} in QKV.`);
|
|
@@ -16,9 +16,9 @@ function b(i) {
|
|
|
16
16
|
];
|
|
17
17
|
return o.dispose(), m;
|
|
18
18
|
}
|
|
19
|
-
const
|
|
19
|
+
const b = {
|
|
20
20
|
kernelName: "QKV",
|
|
21
21
|
backendName: "webgpu",
|
|
22
|
-
kernelFunc:
|
|
22
|
+
kernelFunc: k
|
|
23
23
|
};
|
|
24
|
-
h(
|
|
24
|
+
h(b);
|
package/dist/ops/webgpu/rope.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { isPackedTensor as y } from "../../utilities/packed.js";
|
|
2
|
-
import { e as c } from "../../webgpu_program-
|
|
3
|
-
import { f as x, c as l } from "../../webgpu_util-
|
|
4
|
-
import {
|
|
2
|
+
import { e as c } from "../../webgpu_program-CAE4RICo.js";
|
|
3
|
+
import { f as x, c as l } from "../../webgpu_util-BdovYhXr.js";
|
|
4
|
+
import { h as w, a7 as b } from "../../index-D0RBWjq8.js";
|
|
5
5
|
class v {
|
|
6
6
|
variableNames = ["x", "sin", "cos"];
|
|
7
7
|
outputShape;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { e as p } from "../../webgpu_program-
|
|
2
|
-
import { f as u, c as d } from "../../webgpu_util-
|
|
3
|
-
import {
|
|
1
|
+
import { e as p } from "../../webgpu_program-CAE4RICo.js";
|
|
2
|
+
import { f as u, c as d } from "../../webgpu_util-BdovYhXr.js";
|
|
3
|
+
import { h, a7 as o } from "../../index-D0RBWjq8.js";
|
|
4
4
|
class b {
|
|
5
5
|
variableNames = ["labels", "softmaxProbs", "dy"];
|
|
6
6
|
outputShape;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import { b as u, c as m, e as l } from "../../webgpu_program-
|
|
2
|
-
import { f, c as g } from "../../webgpu_util-
|
|
3
|
-
import {
|
|
4
|
-
import { p as y, a as $ } from "../../slice_util-
|
|
1
|
+
import { b as u, c as m, e as l } from "../../webgpu_program-CAE4RICo.js";
|
|
2
|
+
import { f, c as g } from "../../webgpu_util-BdovYhXr.js";
|
|
3
|
+
import { h as S, V as k } from "../../index-D0RBWjq8.js";
|
|
4
|
+
import { p as y, a as $ } from "../../slice_util-D8CQRenR.js";
|
|
5
5
|
function x(o) {
|
|
6
6
|
switch (o) {
|
|
7
7
|
case 1:
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { h, V as S, e as b } from "../../index-D0RBWjq8.js";
|
|
2
2
|
import { reshape16 as d } from "../reshape16.js";
|
|
3
3
|
import x from "./softmax16_program.js";
|
|
4
4
|
import k from "./softmax16_subgroup_program.js";
|
|
5
5
|
import l from "./utils/deviceInfo.js";
|
|
6
|
-
import { r as z } from "../../reshape-
|
|
6
|
+
import { r as z } from "../../reshape-CkjKPPqB.js";
|
|
7
7
|
function F(a) {
|
|
8
8
|
const { inputs: t, backend: o, attrs: p } = a, { logits: e } = t, { dim: r } = p, m = o.subgroupMinSize, i = o.subgroupMaxSize, c = l(o).subgroupsSupported, s = z(e, [
|
|
9
9
|
S(e.shape) / e.shape[r],
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { e } from "../../webgpu_program-
|
|
2
|
-
import { f as o } from "../../webgpu_util-
|
|
1
|
+
import { e } from "../../webgpu_program-CAE4RICo.js";
|
|
2
|
+
import { f as o } from "../../webgpu_util-BdovYhXr.js";
|
|
3
3
|
class i {
|
|
4
4
|
variableNames = ["logits"];
|
|
5
5
|
outputShape;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { e as o } from "../../webgpu_program-
|
|
2
|
-
import { f as u } from "../../webgpu_util-
|
|
1
|
+
import { e as o } from "../../webgpu_program-CAE4RICo.js";
|
|
2
|
+
import { f as u } from "../../webgpu_util-BdovYhXr.js";
|
|
3
3
|
class i {
|
|
4
4
|
variableNames = ["logits"];
|
|
5
5
|
outputShape;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { p } from "../../index-
|
|
1
|
+
import { h as p } from "../../index-D0RBWjq8.js";
|
|
2
2
|
import { createReduceInfo as m, reduce as l, ReduceProgram as i } from "./utils/reductions.js";
|
|
3
3
|
import { isPackedTensor as n } from "../../utilities/packed.js";
|
|
4
4
|
import k from "./utils/deviceInfo.js";
|
package/dist/ops/webgpu/sub16.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { BinaryOpProgram as
|
|
3
|
-
import { B as
|
|
1
|
+
import { h as t } from "../../index-D0RBWjq8.js";
|
|
2
|
+
import { BinaryOpProgram as s } from "./utils/binary_op.js";
|
|
3
|
+
import { B as p } from "../../binary_op_util-pKXltfxI.js";
|
|
4
4
|
function c(e) {
|
|
5
|
-
const { a: r, b: n } = e.inputs, o = e.backend, a = new p
|
|
5
|
+
const { a: r, b: n } = e.inputs, o = e.backend, a = new s(p.SUB, r.shape, n.shape);
|
|
6
6
|
return o.runWebGPUProgram(a, [r, n], "packedF16");
|
|
7
7
|
}
|
|
8
8
|
const m = {
|