@genai-fi/nanogpt 0.10.3 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.d.ts +10 -5
- package/dist/Generator.js +1789 -1765
- package/dist/{RealDiv-KAPDe8zB.js → RealDiv-C8neBwFi.js} +15 -15
- package/dist/{Reshape-BYkmUnAv.js → Reshape-Bd4V_4X7.js} +1 -1
- package/dist/{Reshape-Zt6eb7yh.js → Reshape-Ck29jQSY.js} +5 -5
- package/dist/TeachableLLM.d.ts +5 -3
- package/dist/TeachableLLM.js +14 -14
- package/dist/Trainer.d.ts +3 -1
- package/dist/Trainer.js +11 -8
- package/dist/{axis_util-BaG7mf5A.js → axis_util-DGqbT-FX.js} +3 -3
- package/dist/backend.js +2 -2
- package/dist/{backend_util-RCe-rHaj.js → backend_util-DC3rBo_H.js} +18 -18
- package/dist/{backend_webgpu-DE3ACOLx.js → backend_webgpu-mbhNnlx9.js} +3 -3
- package/dist/{broadcast_to-B3eYlZm7.js → broadcast_to-D1Dmg2Oz.js} +2 -2
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +2 -2
- package/dist/checks/normRMS.js +4 -4
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +4 -4
- package/dist/checks/rope.js +2 -2
- package/dist/{clip_by_value-BnO7-a88.js → clip_by_value-fg2aKzUy.js} +5 -5
- package/dist/complex-Cyg-eQeZ.js +11 -0
- package/dist/concat-CSm2rMwe.js +17 -0
- package/dist/{concat_util-DpW8mL_l.js → concat_util-D0je5Ppu.js} +1 -1
- package/dist/{dataset-BcwmTGYc.js → dataset-CVIJu7Xa.js} +7 -7
- package/dist/{dropout-BcvN9JYi.js → dropout-DLhSMNTZ.js} +9 -9
- package/dist/expand_dims-ChkuOp6I.js +11 -0
- package/dist/{exports_initializers-Hta_rEnm.js → exports_initializers-1KWPiStI.js} +1 -1
- package/dist/{floor-D5QdR_le.js → floor-BRMPgeIs.js} +1 -1
- package/dist/{gather-D3JcZUaI.js → gather-BSULDalH.js} +1 -1
- package/dist/{gelu-CjNPL4OH.js → gelu-BK1k-n1i.js} +1 -1
- package/dist/{gpgpu_math-DAOmgtXR.js → gpgpu_math-BJSTk_mW.js} +25 -25
- package/dist/{index-BwexR4lA.js → index-BBVLAXZD.js} +89 -89
- package/dist/{index-DOvlwCh-.js → index-Duu1Lvvv.js} +53 -53
- package/dist/{kernel_funcs_utils-CCzYdUZg.js → kernel_funcs_utils-BtYrPoJu.js} +6 -6
- package/dist/layers/BaseLayer.js +2 -2
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +5 -5
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +4 -4
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +9 -9
- package/dist/log_sum_exp-CVqLsVLl.js +39 -0
- package/dist/main.d.ts +10 -1
- package/dist/main.js +68 -58
- package/dist/{matMul16-BWRSOCWB.js → matMul16-xswmhSuF.js} +3 -3
- package/dist/{matMulGelu-CzfgT6Wq.js → matMulGelu-BpvgnYG8.js} +14 -14
- package/dist/mat_mul-Bn2BDpT4.js +11 -0
- package/dist/{mod-AnXEvvpo.js → mod-B4AUd1Np.js} +1 -1
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +9 -9
- package/dist/{ones-D2rT0xk2.js → ones-CBI1AQjb.js} +3 -3
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +9 -9
- package/dist/ops/cpu/adamMoments.js +5 -5
- package/dist/ops/cpu/appendCache.js +6 -6
- package/dist/ops/cpu/attentionMask.js +10 -10
- package/dist/ops/cpu/fusedSoftmax.js +5 -5
- package/dist/ops/cpu/gatherSub.js +9 -9
- package/dist/ops/cpu/gelu.js +5 -5
- package/dist/ops/cpu/matMul16.js +2 -2
- package/dist/ops/cpu/matMulGelu.js +3 -3
- package/dist/ops/cpu/matMulMul.js +5 -5
- package/dist/ops/cpu/mulDropout.js +1 -1
- package/dist/ops/cpu/normRMS.js +7 -7
- package/dist/ops/cpu/qkv.js +3 -3
- package/dist/ops/cpu/rope.js +5 -5
- package/dist/ops/cpu/scatterSub.js +11 -11
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +4 -4
- package/dist/ops/grads/attentionMask.js +2 -2
- package/dist/ops/grads/gelu.js +2 -2
- package/dist/ops/grads/matMul16.js +3 -3
- package/dist/ops/grads/matMulGelu.js +6 -6
- package/dist/ops/grads/normRMS.js +4 -4
- package/dist/ops/grads/pack16.js +3 -3
- package/dist/ops/grads/qkv.js +10 -10
- package/dist/ops/grads/rope.js +2 -2
- package/dist/ops/grads/softmax16.js +1 -1
- package/dist/ops/grads/unpack16.js +2 -2
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +2 -2
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +2 -2
- package/dist/ops/qkv.js +1 -1
- package/dist/ops/reshape16.js +2 -2
- package/dist/ops/rope.js +2 -2
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +1 -1
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +6 -6
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +2 -2
- package/dist/ops/webgl/adamMoments.js +1 -1
- package/dist/ops/webgl/appendCache.js +1 -1
- package/dist/ops/webgl/attentionMask.js +1 -1
- package/dist/ops/webgl/fusedSoftmax.js +4 -4
- package/dist/ops/webgl/gatherSub.js +1 -1
- package/dist/ops/webgl/gelu.js +2 -2
- package/dist/ops/webgl/log.js +3 -3
- package/dist/ops/webgl/matMul16.js +8 -8
- package/dist/ops/webgl/matMulGelu.js +4 -4
- package/dist/ops/webgl/matMulMul.js +7 -7
- package/dist/ops/webgl/mulDropout.js +1 -1
- package/dist/ops/webgl/normRMS.js +7 -7
- package/dist/ops/webgl/qkv.js +1 -1
- package/dist/ops/webgl/rope.js +1 -1
- package/dist/ops/webgl/scatterSub.js +1 -1
- package/dist/ops/webgpu/adamAdjust.js +3 -3
- package/dist/ops/webgpu/adamMoments.js +5 -5
- package/dist/ops/webgpu/add16.js +1 -1
- package/dist/ops/webgpu/appendCache.js +3 -3
- package/dist/ops/webgpu/attentionMask.js +2 -2
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +5 -5
- package/dist/ops/webgpu/gatherSub.js +5 -5
- package/dist/ops/webgpu/gelu.js +3 -3
- package/dist/ops/webgpu/matMul16.js +19 -19
- package/dist/ops/webgpu/matMul16_program.js +2 -2
- package/dist/ops/webgpu/mul16.js +4 -4
- package/dist/ops/webgpu/normRMS.js +6 -6
- package/dist/ops/webgpu/normRMSGrad.js +4 -4
- package/dist/ops/webgpu/pack16.js +3 -3
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +8 -8
- package/dist/ops/webgpu/rope.js +3 -3
- package/dist/ops/webgpu/scatterSub.js +3 -3
- package/dist/ops/webgpu/slice16.js +4 -4
- package/dist/ops/webgpu/softmax16.js +4 -4
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +1 -1
- package/dist/ops/webgpu/sub16.js +4 -4
- package/dist/ops/webgpu/sum16.js +5 -5
- package/dist/ops/webgpu/transpose16.js +2 -2
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +3 -3
- package/dist/ops/webgpu/unpack16.js +5 -5
- package/dist/ops/webgpu/utils/binary_op.js +3 -3
- package/dist/ops/webgpu/utils/reductions.js +4 -4
- package/dist/{ops-B5yanEdW.js → ops-C2_OXuZ4.js} +69 -69
- package/dist/{pack16-nQ6JaLo-.js → pack16-atD0eYRm.js} +9 -9
- package/dist/patches/webgpu_backend.js +6 -6
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +8 -8
- package/dist/{random_width-or-CEftb.js → random_width-BN4wGJaW.js} +33 -33
- package/dist/range-DKmP1-OQ.js +10 -0
- package/dist/relu-BsXmGzzu.js +9 -0
- package/dist/{reshape-ByE68wS9.js → reshape-BI0yzp1T.js} +1 -1
- package/dist/{resize_nearest_neighbor-B19mCEg2.js → resize_nearest_neighbor-BA_BX-ub.js} +26 -26
- package/dist/{rope-Ir4mTyD1.js → rope-DJ7Y7c-u.js} +1 -1
- package/dist/{scatter_nd_util-lvSiX8q4.js → scatter_nd_util-k9MUVUkn.js} +1 -1
- package/dist/{selu_util-kbhpTdYD.js → selu_util-DyW0X1WG.js} +5 -5
- package/dist/{shared-DT1TkE6w.js → shared-Q3BS6T03.js} +1 -1
- package/dist/{shared-dntlHIDQ.js → shared-nnSWpC3u.js} +86 -86
- package/dist/{slice-BfEGSH82.js → slice-wBNvzVyz.js} +1 -1
- package/dist/{slice_util-uTKwiEpW.js → slice_util-zN8KFC5I.js} +1 -1
- package/dist/{softmax-CA5jFsLR.js → softmax-DfuYyjMh.js} +1 -1
- package/dist/split-BYrLboMq.js +9 -0
- package/dist/squeeze-Bk8Brcct.js +10 -0
- package/dist/{stack-Cf4n9h0N.js → stack-CDWShFHF.js} +1 -1
- package/dist/{step-CINUs5QB.js → step-BS5JXRR6.js} +23 -23
- package/dist/{sum-DWAtNGez.js → sum-BPUfDB2X.js} +3 -3
- package/dist/tensor-CEt9Nm2s.js +8 -0
- package/dist/tensor1d-Cc_KCIDg.js +11 -0
- package/dist/{tensor2d-Bs9wZRc7.js → tensor2d-BN97fF71.js} +3 -3
- package/dist/{tensor4d-BARPdTaS.js → tensor4d-vuDDgdUI.js} +1 -1
- package/dist/{tfjs_backend-y1cvNhLA.js → tfjs_backend-806hyYve.js} +49 -49
- package/dist/{tile-mbfagpsB.js → tile-OWUvpIVt.js} +3 -3
- package/dist/tokeniser/BaseTokeniser.d.ts +25 -0
- package/dist/tokeniser/BaseTokeniser.js +94 -0
- package/dist/tokeniser/CharTokeniser.d.ts +10 -9
- package/dist/tokeniser/CharTokeniser.js +44 -30
- package/dist/tokeniser/bpe.d.ts +10 -9
- package/dist/tokeniser/bpe.js +67 -52
- package/dist/tokeniser/type.d.ts +14 -5
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.d.ts +3 -3
- package/dist/training/DatasetBuilder.js +34 -38
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.d.ts +4 -3
- package/dist/training/Trainer.js +22 -25
- package/dist/training/sparseCrossEntropy.js +3 -3
- package/dist/training/tasks/ConversationTask.d.ts +11 -0
- package/dist/training/tasks/ConversationTask.js +26 -0
- package/dist/training/tasks/PretrainingTask.d.ts +11 -0
- package/dist/training/tasks/PretrainingTask.js +34 -0
- package/dist/training/tasks/StartSentenceTask.d.ts +12 -0
- package/dist/training/tasks/StartSentenceTask.js +42 -0
- package/dist/training/tasks/Task.d.ts +8 -0
- package/dist/training/tasks/Task.js +41 -0
- package/dist/{transpose-ClWiBS_b.js → transpose-BUkQCJp9.js} +6 -6
- package/dist/{unsorted_segment_sum-BDDhB_E6.js → unsorted_segment_sum-BljxHhCY.js} +5 -5
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.js +1 -1
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.d.ts +1 -1
- package/dist/utilities/sentences.js +11 -11
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-WawDEaAb.js → variable-DPt_Iuog.js} +1 -1
- package/dist/{webgpu_program-DuOXPQol.js → webgpu_program-BpWRlghH.js} +3 -3
- package/dist/{webgpu_util-RxEF33Rj.js → webgpu_util-DMiKzzQM.js} +7 -7
- package/dist/{zeros-KnWaWf-X.js → zeros-5YROwwUH.js} +2 -2
- package/dist/{zeros_like-DvE73F4e.js → zeros_like-De4n1C3m.js} +71 -71
- package/package.json +1 -1
- package/dist/complex-DjxcVmoX.js +0 -11
- package/dist/concat-BV8bt5H-.js +0 -17
- package/dist/expand_dims-DT4tEPwA.js +0 -11
- package/dist/log_sum_exp-ngO0-4pK.js +0 -39
- package/dist/mat_mul-SjpJRLyL.js +0 -11
- package/dist/range-BklejeeW.js +0 -10
- package/dist/relu-CP0ZcxWO.js +0 -9
- package/dist/split-CVLc0w--.js +0 -9
- package/dist/squeeze-C7Z2srUo.js +0 -10
- package/dist/tensor-DJoc7gJU.js +0 -8
- package/dist/tensor1d-D11P_7Dp.js +0 -11
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { r as M } from "./Reshape-
|
|
3
|
-
import { u as H } from "./gpgpu_math-
|
|
4
|
-
import { m as B } from "./mat_mul-
|
|
1
|
+
import { f as C, t as R, e as I, U as G, _ as L, x as U, ak as F } from "./index-Duu1Lvvv.js";
|
|
2
|
+
import { r as M } from "./Reshape-Ck29jQSY.js";
|
|
3
|
+
import { u as H } from "./gpgpu_math-BJSTk_mW.js";
|
|
4
|
+
import { m as B } from "./mat_mul-Bn2BDpT4.js";
|
|
5
5
|
class W {
|
|
6
6
|
constructor(e, s, a, n = !1, o = !1, r = !1, i = null, u = !1, l = !1) {
|
|
7
7
|
this.variableNames = ["matrixA", "matrixB"], this.packedInputs = !0, this.packedOutput = !0, this.outputShape = a, this.enableShapeUniforms = H(this.outputShape.length);
|
|
8
|
-
const p = n ? e[1] : e[2], h = Math.ceil(p / 2), d = n ? "i * 2, rc.y" : "rc.y, i * 2",
|
|
8
|
+
const p = n ? e[1] : e[2], h = Math.ceil(p / 2), d = n ? "i * 2, rc.y" : "rc.y, i * 2", b = o ? "rc.z, i * 2" : "i * 2, rc.z", x = n ? ["a.xxyy", "a.zzww"] : ["a.xxzz", "a.yyww"], m = o ? ["b.xzxz", "b.ywyw"] : ["b.xyxy", "b.zwzw"];
|
|
9
9
|
let c = "", g = "";
|
|
10
10
|
i && (u ? c = `vec4 activation(vec4 a) {
|
|
11
11
|
vec4 b = getPreluActivationWeightsAtOutCoords();
|
|
@@ -30,12 +30,12 @@ class W {
|
|
|
30
30
|
int batchB = ${v};
|
|
31
31
|
for (int i = 0; i < ${h}; i++) {
|
|
32
32
|
vec4 a = getMatrixA(batchA, ${d});
|
|
33
|
-
vec4 b = getMatrixB(batchB, ${
|
|
33
|
+
vec4 b = getMatrixB(batchB, ${b});
|
|
34
34
|
|
|
35
35
|
// These swizzled products need to be separately added.
|
|
36
36
|
// See: https://github.com/tensorflow/tfjs/issues/1735
|
|
37
|
-
result += (${
|
|
38
|
-
result += (${
|
|
37
|
+
result += (${x[0]} * ${m[0]});
|
|
38
|
+
result += (${x[1]} * ${m[1]});
|
|
39
39
|
}
|
|
40
40
|
return result;
|
|
41
41
|
}
|
|
@@ -90,24 +90,24 @@ function O({
|
|
|
90
90
|
activationSnippet: o,
|
|
91
91
|
multiplier: r
|
|
92
92
|
}) {
|
|
93
|
-
const i = t.shape.length, u = e.shape.length, l = s ? t.shape[i - 2] : t.shape[i - 1], p = a ? e.shape[u - 1] : e.shape[u - 2], h = s ? t.shape[i - 1] : t.shape[i - 2], d = a ? e.shape[u - 2] : e.shape[u - 1],
|
|
94
|
-
|
|
93
|
+
const i = t.shape.length, u = e.shape.length, l = s ? t.shape[i - 2] : t.shape[i - 1], p = a ? e.shape[u - 1] : e.shape[u - 2], h = s ? t.shape[i - 1] : t.shape[i - 2], d = a ? e.shape[u - 2] : e.shape[u - 1], b = t.shape.slice(0, -2), x = e.shape.slice(0, -2), m = G(b), c = G(x), $ = L(t.shape.slice(0, -2), e.shape.slice(0, -2)).concat([h, d]);
|
|
94
|
+
U(
|
|
95
95
|
l === p,
|
|
96
96
|
() => `Error in matMul: inner shapes (${l}) and (${p}) of Tensors with shapes ${t.shape} and ${e.shape} and transposeA=${s} and transposeB=${a} must match.`
|
|
97
97
|
);
|
|
98
|
-
const f = s ? [m, l, h] : [m, h, l], v = a ? [c, d, p] : [c, p, d], A = M({ inputs: { x: t }, backend: n, attrs: { shape: f } }), y = M({ inputs: { x: e }, backend: n, attrs: { shape: v } }), k = [A, y],
|
|
98
|
+
const f = s ? [m, l, h] : [m, h, l], v = a ? [c, d, p] : [c, p, d], A = M({ inputs: { x: t }, backend: n, attrs: { shape: f } }), y = M({ inputs: { x: e }, backend: n, attrs: { shape: v } }), k = [A, y], _ = Math.max(m, c), E = o, N = F(t.dtype, e.dtype), T = new W(
|
|
99
99
|
f,
|
|
100
100
|
v,
|
|
101
|
-
[
|
|
101
|
+
[_, h, d],
|
|
102
102
|
s,
|
|
103
103
|
a,
|
|
104
104
|
!1,
|
|
105
|
-
|
|
105
|
+
E,
|
|
106
106
|
!!r,
|
|
107
107
|
!1
|
|
108
108
|
), D = [A, y];
|
|
109
109
|
r && D.push(r);
|
|
110
|
-
const z = n.runWebGLProgram(
|
|
110
|
+
const z = n.runWebGLProgram(T, D, N), K = M({ inputs: { x: z }, backend: n, attrs: { shape: $ } });
|
|
111
111
|
k.push(z);
|
|
112
112
|
for (const P of k)
|
|
113
113
|
n.disposeIntermediateTensorInfo(P);
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { o as m, q as s, B as c, E as M, D as p } from "./index-Duu1Lvvv.js";
|
|
2
|
+
function f(e, o, n = !1, l = !1) {
|
|
3
|
+
let a = s(e, "a", "matMul"), t = s(o, "b", "matMul");
|
|
4
|
+
[a, t] = c(a, t);
|
|
5
|
+
const r = { a, b: t }, u = { transposeA: n, transposeB: l };
|
|
6
|
+
return M.runKernel(p, r, u);
|
|
7
|
+
}
|
|
8
|
+
const i = /* @__PURE__ */ m({ matMul_: f });
|
|
9
|
+
export {
|
|
10
|
+
i as m
|
|
11
|
+
};
|
package/dist/models/NanoGPTV1.js
CHANGED
|
@@ -3,11 +3,11 @@ import b from "../layers/TransformerBlock.js";
|
|
|
3
3
|
import k from "../layers/TiedEmbedding.js";
|
|
4
4
|
import w from "../layers/RoPECache.js";
|
|
5
5
|
import E from "../layers/RMSNorm.js";
|
|
6
|
-
import { t as l, k as u } from "../index-
|
|
6
|
+
import { t as l, k as u } from "../index-Duu1Lvvv.js";
|
|
7
7
|
import C from "./model.js";
|
|
8
8
|
import P from "../layers/PositionEmbedding.js";
|
|
9
9
|
import { packingSupported as _ } from "../utilities/packed.js";
|
|
10
|
-
import { p as y, u as M } from "../pack16-
|
|
10
|
+
import { p as y, u as M } from "../pack16-atD0eYRm.js";
|
|
11
11
|
class I extends C {
|
|
12
12
|
wte;
|
|
13
13
|
// Token embeddings
|
package/dist/models/model.js
CHANGED
|
@@ -1,23 +1,23 @@
|
|
|
1
1
|
import m from "../layers/BaseLayer.js";
|
|
2
|
-
import "../index-
|
|
3
|
-
import "../random_width-
|
|
4
|
-
import "../zeros_like-
|
|
2
|
+
import "../index-Duu1Lvvv.js";
|
|
3
|
+
import "../random_width-BN4wGJaW.js";
|
|
4
|
+
import "../zeros_like-De4n1C3m.js";
|
|
5
5
|
import "../Generator.js";
|
|
6
6
|
import "../index-Cp39cXWe.js";
|
|
7
|
-
import "../dataset-
|
|
7
|
+
import "../dataset-CVIJu7Xa.js";
|
|
8
8
|
import "../ops/cpu/attentionMask.js";
|
|
9
9
|
import "../ops/webgl/attentionMask.js";
|
|
10
10
|
import "../ops/grads/attentionMask.js";
|
|
11
11
|
import "../ops/cpu/rope.js";
|
|
12
12
|
import "../ops/webgl/rope.js";
|
|
13
|
-
import "../rope-
|
|
13
|
+
import "../rope-DJ7Y7c-u.js";
|
|
14
14
|
import "../ops/cpu/appendCache.js";
|
|
15
15
|
import "../ops/webgl/appendCache.js";
|
|
16
16
|
import "../ops/grads/softmax16.js";
|
|
17
|
-
import "../matMul16-
|
|
17
|
+
import "../matMul16-xswmhSuF.js";
|
|
18
18
|
import "../ops/webgl/matMul16.js";
|
|
19
19
|
import "../ops/cpu/matMul16.js";
|
|
20
|
-
import "../pack16-
|
|
20
|
+
import "../pack16-atD0eYRm.js";
|
|
21
21
|
import "../ops/transpose16.js";
|
|
22
22
|
import "../ops/reshape16.js";
|
|
23
23
|
import "../ops/cpu/qkv.js";
|
|
@@ -40,11 +40,11 @@ import "../ops/webgl/scatterSub.js";
|
|
|
40
40
|
import "../ops/cpu/gatherSub.js";
|
|
41
41
|
import "../ops/webgl/gatherSub.js";
|
|
42
42
|
import "../ops/cpu/matMulGelu.js";
|
|
43
|
-
import "../matMulGelu-
|
|
43
|
+
import "../matMulGelu-BpvgnYG8.js";
|
|
44
44
|
import "../ops/grads/matMulGelu.js";
|
|
45
45
|
import "../ops/cpu/gelu.js";
|
|
46
46
|
import "../ops/webgl/gelu.js";
|
|
47
|
-
import "../gelu-
|
|
47
|
+
import "../gelu-BK1k-n1i.js";
|
|
48
48
|
import "../ops/webgl/log.js";
|
|
49
49
|
import "../checks/normRMS.js";
|
|
50
50
|
import "../checks/normRMSGrad.js";
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { c as f } from "./complex-
|
|
3
|
-
import { z as c } from "./zeros-
|
|
1
|
+
import { u as n, V as t, U as m, E as i } from "./index-Duu1Lvvv.js";
|
|
2
|
+
import { c as f } from "./complex-Cyg-eQeZ.js";
|
|
3
|
+
import { z as c } from "./zeros-5YROwwUH.js";
|
|
4
4
|
function l(o, r = "float32") {
|
|
5
5
|
if (n(o), r === "complex64") {
|
|
6
6
|
const s = l(o, "float32"), a = c(o, "float32");
|
package/dist/ops/adamAdjust.js
CHANGED
package/dist/ops/adamMoments.js
CHANGED
package/dist/ops/add16.js
CHANGED
package/dist/ops/appendCache.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { e as a } from "../index-
|
|
1
|
+
import { e as a } from "../index-Duu1Lvvv.js";
|
|
2
2
|
import "./cpu/appendCache.js";
|
|
3
3
|
import "./webgl/appendCache.js";
|
|
4
4
|
import { isPackedTensor as c } from "../utilities/packed.js";
|
|
5
|
-
import { c as t } from "../concat-
|
|
6
|
-
import { z as f } from "../zeros-
|
|
5
|
+
import { c as t } from "../concat-CSm2rMwe.js";
|
|
6
|
+
import { z as f } from "../zeros-5YROwwUH.js";
|
|
7
7
|
function C(r, o, n, p) {
|
|
8
8
|
if (!p) {
|
|
9
9
|
const e = r.shape[2], s = c(r);
|
package/dist/ops/concat16.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { isPackedTensor as o } from "../utilities/packed.js";
|
|
2
|
-
import { e } from "../index-
|
|
3
|
-
import { c } from "../concat-
|
|
2
|
+
import { e } from "../index-Duu1Lvvv.js";
|
|
3
|
+
import { c } from "../concat-CSm2rMwe.js";
|
|
4
4
|
function p(r, n) {
|
|
5
5
|
return o(r[0]) ? e().runKernel("Concat16", r, { axis: n ?? -1 }) : c(r, n);
|
|
6
6
|
}
|
|
@@ -1,18 +1,18 @@
|
|
|
1
|
-
import {
|
|
2
|
-
function
|
|
3
|
-
const { moments:
|
|
4
|
-
|
|
1
|
+
import { f as k, j as t, l as i, m as z, n as A } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
function C(c) {
|
|
3
|
+
const { moments: n, value: r } = c.inputs, { beta1: l, beta2: m, epsilon: u, learningRate: d } = c.attrs, e = n.shape.length, a = new Array(e).fill(0), s = n.shape.slice();
|
|
4
|
+
s[e - 1] = 1;
|
|
5
5
|
const o = a.slice();
|
|
6
6
|
o[e - 1] = 1;
|
|
7
|
-
const
|
|
7
|
+
const b = s.slice(), p = n.slice(a, s).squeeze([e - 1]), M = n.slice(o, b).squeeze([e - 1]), f = t(p, l), g = t(M, m);
|
|
8
8
|
return i(
|
|
9
|
-
|
|
9
|
+
z(t(f, i(A(g), u ?? 1e-8)), -d),
|
|
10
10
|
r
|
|
11
11
|
);
|
|
12
12
|
}
|
|
13
|
-
const
|
|
13
|
+
const h = {
|
|
14
14
|
kernelName: "AdamAdjust",
|
|
15
15
|
backendName: "cpu",
|
|
16
|
-
kernelFunc:
|
|
16
|
+
kernelFunc: C
|
|
17
17
|
};
|
|
18
|
-
k(
|
|
18
|
+
k(h);
|
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
import { p } from "../../index-
|
|
2
|
-
import { s as
|
|
3
|
-
function
|
|
1
|
+
import { f as p } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { s as f } from "../../stack-CDWShFHF.js";
|
|
3
|
+
function b(t) {
|
|
4
4
|
const { moments: n, gradient: c } = t.inputs, { beta1: o, beta2: m } = t.attrs, e = n.shape.length, a = new Array(e).fill(0), s = n.shape.slice();
|
|
5
5
|
s[e - 1] = 1;
|
|
6
6
|
const i = a.slice();
|
|
7
7
|
i[e - 1] = 1;
|
|
8
8
|
const r = s.slice(), l = n.slice(a, s).squeeze([e - 1]), u = n.slice(i, r).squeeze([e - 1]), M = l.mul(o).add(c.mul(1 - o)), d = u.mul(m).add(c.square().mul(1 - m));
|
|
9
|
-
return
|
|
9
|
+
return f([M, d], -1);
|
|
10
10
|
}
|
|
11
11
|
const g = {
|
|
12
12
|
kernelName: "AdamMoments",
|
|
13
13
|
backendName: "cpu",
|
|
14
|
-
kernelFunc:
|
|
14
|
+
kernelFunc: b
|
|
15
15
|
};
|
|
16
16
|
p(g);
|
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { c as h } from "../../concat-
|
|
1
|
+
import { f as d } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { c as h } from "../../concat-CSm2rMwe.js";
|
|
3
3
|
function u(p) {
|
|
4
4
|
const { cache: n, item: s } = p.inputs, { maxSize: i, pastLen: c } = p.attrs, t = n.shape[0], o = n.shape[1], a = n.shape[3], e = s.shape[2];
|
|
5
5
|
if (c + e <= i) {
|
|
6
|
-
const
|
|
7
|
-
return
|
|
6
|
+
const l = n.slice([0, 0, 0, 0], [t, o, c, a]), m = n.slice([0, 0, c + e, 0], [t, o, i - c - e, a]), r = e < e ? s.slice([0, 0, 0, 0], [t, o, e, a]) : s, k = h([l, r, m], 2);
|
|
7
|
+
return l.dispose(), m.dispose(), r !== s && r.dispose(), k;
|
|
8
8
|
}
|
|
9
|
-
const
|
|
10
|
-
return
|
|
9
|
+
const f = n.slice([0, 0, e, 0], [t, o, i - e, a]), C = h([f, s], 2);
|
|
10
|
+
return f.dispose(), C;
|
|
11
11
|
}
|
|
12
12
|
const w = {
|
|
13
13
|
kernelName: "AppendCache",
|
|
@@ -1,22 +1,22 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { l as N } from "../../ops-
|
|
3
|
-
import { o as b } from "../../ones-
|
|
4
|
-
import { z as A } from "../../zeros-
|
|
5
|
-
import { w as I } from "../../resize_nearest_neighbor-
|
|
6
|
-
import { m as g } from "../../mat_mul-
|
|
1
|
+
import { f as o, h as d, b as u } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { l as N } from "../../ops-C2_OXuZ4.js";
|
|
3
|
+
import { o as b } from "../../ones-CBI1AQjb.js";
|
|
4
|
+
import { z as A } from "../../zeros-5YROwwUH.js";
|
|
5
|
+
import { w as I } from "../../resize_nearest_neighbor-BA_BX-ub.js";
|
|
6
|
+
import { m as g } from "../../mat_mul-Bn2BDpT4.js";
|
|
7
7
|
function a(n) {
|
|
8
8
|
const { q: s, k: e } = n.inputs, { divisor: r } = n.attrs, c = s.shape[2], t = e.shape[2], m = N.bandPart(b([t, t]), -1, 0).cast("bool"), i = A([t, t]), l = d([t, t], Number.NEGATIVE_INFINITY), f = I(m, i, l), k = g(s, e, !1, !0).mul(u(r)), p = f.slice([0, 0], [c, t]).expandDims(0).expandDims(0);
|
|
9
9
|
return k.add(p);
|
|
10
10
|
}
|
|
11
|
-
const
|
|
11
|
+
const h = {
|
|
12
12
|
kernelName: "AttentionMask",
|
|
13
13
|
backendName: "cpu",
|
|
14
14
|
kernelFunc: a
|
|
15
15
|
};
|
|
16
|
-
o(
|
|
17
|
-
const
|
|
16
|
+
o(h);
|
|
17
|
+
const w = {
|
|
18
18
|
kernelName: "AttentionMask",
|
|
19
19
|
backendName: "tensorflow",
|
|
20
20
|
kernelFunc: a
|
|
21
21
|
};
|
|
22
|
-
o(
|
|
22
|
+
o(w);
|
|
@@ -1,17 +1,17 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { s as
|
|
1
|
+
import { f as e } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { s as f } from "../../softmax-DfuYyjMh.js";
|
|
3
3
|
function n(t) {
|
|
4
4
|
const { inputs: s, attrs: a } = t, { logits: o } = s, { dim: i, dropoutRate: r } = a;
|
|
5
5
|
if (!o)
|
|
6
6
|
throw new Error("Error in softmax: input logits is null");
|
|
7
|
-
return r !== void 0 && r > 0 && console.warn("Dropout in fusedSoftmax not implemented for CPU backend, skipping dropout."),
|
|
7
|
+
return r !== void 0 && r > 0 && console.warn("Dropout in fusedSoftmax not implemented for CPU backend, skipping dropout."), f(o, i);
|
|
8
8
|
}
|
|
9
|
-
const
|
|
9
|
+
const m = {
|
|
10
10
|
kernelName: "FusedSoftmax",
|
|
11
11
|
backendName: "cpu",
|
|
12
12
|
kernelFunc: n
|
|
13
13
|
};
|
|
14
|
-
e(
|
|
14
|
+
e(m);
|
|
15
15
|
const u = {
|
|
16
16
|
kernelName: "FusedSoftmax",
|
|
17
17
|
backendName: "tensorflow",
|
|
@@ -1,18 +1,18 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { r as
|
|
3
|
-
import { s as
|
|
4
|
-
function
|
|
1
|
+
import { o as u, q as c, E as g, aj as h, f as m, c as p } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { r as f } from "../../range-DKmP1-OQ.js";
|
|
3
|
+
import { s as l } from "../../stack-CDWShFHF.js";
|
|
4
|
+
function N(e, t) {
|
|
5
5
|
const n = c(t, "indices", "gatherND", "int32"), s = { params: c(e, "x", "gatherND", "string_or_numeric"), indices: n };
|
|
6
|
-
return g.runKernel(
|
|
6
|
+
return g.runKernel(h, s);
|
|
7
7
|
}
|
|
8
|
-
const b = /* @__PURE__ */ u({ gatherND_:
|
|
8
|
+
const b = /* @__PURE__ */ u({ gatherND_: N });
|
|
9
9
|
function d(e) {
|
|
10
|
-
const { values: t, labels: n, logits: r } = e.inputs, s = n.shape[0], a =
|
|
11
|
-
return
|
|
10
|
+
const { values: t, labels: n, logits: r } = e.inputs, s = n.shape[0], a = f(0, s, 1, "int32"), o = l([a, n], 1), i = b(r, o);
|
|
11
|
+
return p(t, i);
|
|
12
12
|
}
|
|
13
13
|
const k = {
|
|
14
14
|
kernelName: "EfficientGatherSub",
|
|
15
15
|
backendName: "cpu",
|
|
16
16
|
kernelFunc: d
|
|
17
17
|
};
|
|
18
|
-
|
|
18
|
+
m(k);
|
package/dist/ops/cpu/gelu.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { f as t, t as d } from "../../index-Duu1Lvvv.js";
|
|
2
2
|
const o = 0.7978845608028654, c = 0.044715;
|
|
3
3
|
function m(r) {
|
|
4
4
|
const { inputs: u } = r, { x: n } = u, e = n;
|
|
@@ -7,12 +7,12 @@ function m(r) {
|
|
|
7
7
|
return e.mul(s);
|
|
8
8
|
});
|
|
9
9
|
}
|
|
10
|
-
const
|
|
10
|
+
const p = {
|
|
11
11
|
kernelName: "Gelu",
|
|
12
12
|
backendName: "cpu",
|
|
13
13
|
kernelFunc: m
|
|
14
14
|
};
|
|
15
|
-
t(
|
|
15
|
+
t(p);
|
|
16
16
|
const K = {
|
|
17
17
|
kernelName: "Gelu",
|
|
18
18
|
backendName: "tensorflow",
|
|
@@ -22,8 +22,8 @@ t(K);
|
|
|
22
22
|
function i(r) {
|
|
23
23
|
const { dy: u, x: n } = r.inputs;
|
|
24
24
|
return d(() => {
|
|
25
|
-
const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5),
|
|
26
|
-
return u.mul(
|
|
25
|
+
const e = n.square(), l = e.mul(n), a = n.add(l.mul(c)).mul(o).tanh(), f = a.square().neg().add(1), k = e.mul(3 * c).add(1), g = a.add(1).mul(0.5), G = n.mul(f).mul(o).mul(k).mul(0.5), N = g.add(G);
|
|
26
|
+
return u.mul(N);
|
|
27
27
|
});
|
|
28
28
|
}
|
|
29
29
|
const x = {
|
package/dist/ops/cpu/matMul16.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { isPackedTensor as t } from "../../utilities/packed.js";
|
|
2
|
-
import { p } from "../../index-
|
|
3
|
-
import { m as l } from "../../mat_mul-
|
|
2
|
+
import { f as p } from "../../index-Duu1Lvvv.js";
|
|
3
|
+
import { m as l } from "../../mat_mul-Bn2BDpT4.js";
|
|
4
4
|
function m(e) {
|
|
5
5
|
const { A: n, B: r } = e.inputs, { transposeA: o, transposeB: s } = e.attrs, a = !t(n), c = !t(r);
|
|
6
6
|
if (a && c)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { g as M, d as i } from "../../gelu-
|
|
3
|
-
import { m as k } from "../../mat_mul-
|
|
1
|
+
import { f as e, t as m } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { g as M, d as i } from "../../gelu-BK1k-n1i.js";
|
|
3
|
+
import { m as k } from "../../mat_mul-Bn2BDpT4.js";
|
|
4
4
|
function c(t) {
|
|
5
5
|
const { inputs: u } = t, { x: n, kernel: r } = u, a = n, l = r;
|
|
6
6
|
return m(() => {
|
|
@@ -1,20 +1,20 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { f as e, t as i } from "../../index-Duu1Lvvv.js";
|
|
2
2
|
function n(t) {
|
|
3
3
|
const { inputs: r, attrs: l } = t, { transposeA: o, transposeB: s } = l, { x: u, kernel: a, y: c } = r, m = u, k = a, M = c;
|
|
4
4
|
return i(() => m.matMul(k, o, s).mul(M));
|
|
5
5
|
}
|
|
6
|
-
const
|
|
6
|
+
const f = {
|
|
7
7
|
kernelName: "MatMulMul",
|
|
8
8
|
backendName: "cpu",
|
|
9
9
|
kernelFunc: n
|
|
10
10
|
};
|
|
11
|
-
e(
|
|
12
|
-
const
|
|
11
|
+
e(f);
|
|
12
|
+
const p = {
|
|
13
13
|
kernelName: "MatMulMul",
|
|
14
14
|
backendName: "tensorflow",
|
|
15
15
|
kernelFunc: n
|
|
16
16
|
};
|
|
17
|
-
e(
|
|
17
|
+
e(p);
|
|
18
18
|
const g = {
|
|
19
19
|
kernelName: "MatMulMul",
|
|
20
20
|
backendName: "webgpu",
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { f as e, m as t } from "../../index-Duu1Lvvv.js";
|
|
2
2
|
function n(o) {
|
|
3
3
|
const { inputs: r } = o, { a: l, b: u } = r;
|
|
4
4
|
return console.warn("Using fallback mulDrop implementation without dropout."), t(l, u);
|
package/dist/ops/cpu/normRMS.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { f as o, t as d } from "../../index-Duu1Lvvv.js";
|
|
2
2
|
function i(t) {
|
|
3
3
|
const { inputs: e } = t, { x: n, gamma: s } = e, r = n, a = s;
|
|
4
4
|
return d(() => {
|
|
@@ -6,12 +6,12 @@ function i(t) {
|
|
|
6
6
|
return r.mul(u).mul(a);
|
|
7
7
|
});
|
|
8
8
|
}
|
|
9
|
-
const
|
|
9
|
+
const k = {
|
|
10
10
|
kernelName: "RMSNorm",
|
|
11
11
|
backendName: "cpu",
|
|
12
12
|
kernelFunc: i
|
|
13
13
|
};
|
|
14
|
-
o(
|
|
14
|
+
o(k);
|
|
15
15
|
const g = {
|
|
16
16
|
kernelName: "RMSNorm",
|
|
17
17
|
backendName: "tensorflow",
|
|
@@ -21,8 +21,8 @@ o(g);
|
|
|
21
21
|
function N(t) {
|
|
22
22
|
const { dy: e, x: n, gamma: s } = t.inputs;
|
|
23
23
|
return d(() => {
|
|
24
|
-
const r = n.shape[n.shape.length - 1], a = n.square().mean(-1, !0), m = a.add(1e-8).rsqrt(), u = n.mul(m), l = e.mul(u).sum([0, 1]), c = e.mul(s),
|
|
25
|
-
return [c.mul(m).sub(n.mul(
|
|
24
|
+
const r = n.shape[n.shape.length - 1], a = n.square().mean(-1, !0), m = a.add(1e-8).rsqrt(), u = n.mul(m), l = e.mul(u).sum([0, 1]), c = e.mul(s), f = c.mul(n).sum(-1, !0).div(r);
|
|
25
|
+
return [c.mul(m).sub(n.mul(f).mul(m).div(a.add(1e-8))), l];
|
|
26
26
|
});
|
|
27
27
|
}
|
|
28
28
|
const S = {
|
|
@@ -31,9 +31,9 @@ const S = {
|
|
|
31
31
|
kernelFunc: N
|
|
32
32
|
};
|
|
33
33
|
o(S);
|
|
34
|
-
const
|
|
34
|
+
const R = {
|
|
35
35
|
kernelName: "RMSNormGrad",
|
|
36
36
|
backendName: "tensorflow",
|
|
37
37
|
kernelFunc: N
|
|
38
38
|
};
|
|
39
|
-
o(
|
|
39
|
+
o(R);
|
package/dist/ops/cpu/qkv.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { r as o } from "../../reshape-
|
|
3
|
-
import { s as x } from "../../split-
|
|
1
|
+
import { f as q } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { r as o } from "../../reshape-BI0yzp1T.js";
|
|
3
|
+
import { s as x } from "../../split-BYrLboMq.js";
|
|
4
4
|
function v(p) {
|
|
5
5
|
const { x: c, kernel: K } = p.inputs, { heads: n, packed: C } = p.attrs;
|
|
6
6
|
if (C)
|
package/dist/ops/cpu/rope.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { r as y } from "../../range-
|
|
3
|
-
import { g as F } from "../../gather-
|
|
4
|
-
import { s as E } from "../../stack-
|
|
5
|
-
import { c as T } from "../../concat-
|
|
1
|
+
import { f as I } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { r as y } from "../../range-DKmP1-OQ.js";
|
|
3
|
+
import { g as F } from "../../gather-BSULDalH.js";
|
|
4
|
+
import { s as E } from "../../stack-CDWShFHF.js";
|
|
5
|
+
import { c as T } from "../../concat-CSm2rMwe.js";
|
|
6
6
|
function U(c, r, p, e, n) {
|
|
7
7
|
const t = e.shape[3], s = p;
|
|
8
8
|
if (s > t) return e;
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { v as D } from "../../scatter_nd_util-
|
|
3
|
-
import { r as k } from "../../range-
|
|
4
|
-
import { s as v } from "../../stack-
|
|
5
|
-
import { o as E } from "../../ones-
|
|
1
|
+
import { o as f, u as g, q as r, E as l, ai as N, f as b, c as S, m as h } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { v as D } from "../../scatter_nd_util-k9MUVUkn.js";
|
|
3
|
+
import { r as k } from "../../range-DKmP1-OQ.js";
|
|
4
|
+
import { s as v } from "../../stack-CDWShFHF.js";
|
|
5
|
+
import { o as E } from "../../ones-CBI1AQjb.js";
|
|
6
6
|
function I(a, e, s) {
|
|
7
7
|
g(s);
|
|
8
8
|
const n = r(a, "indices", "scatterND", "int32"), t = r(e, "updates", "scatterND");
|
|
@@ -10,14 +10,14 @@ function I(a, e, s) {
|
|
|
10
10
|
const c = { indices: n, updates: t }, o = { shape: s };
|
|
11
11
|
return l.runKernel(N, c, o);
|
|
12
12
|
}
|
|
13
|
-
const
|
|
14
|
-
function
|
|
15
|
-
const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u =
|
|
13
|
+
const K = /* @__PURE__ */ f({ scatterND_: I });
|
|
14
|
+
function L(a) {
|
|
15
|
+
const { logits: e, labels: s, dy: n } = a.inputs, t = s.shape[0], c = e.shape[1], o = k(0, t, 1, "int32"), i = v([o, s], 1), d = E([t]), u = K(i, d, [t, c]), p = S(e, u), m = n.reshape([t, 1]);
|
|
16
16
|
return h(p, m);
|
|
17
17
|
}
|
|
18
|
-
const
|
|
18
|
+
const T = {
|
|
19
19
|
kernelName: "EfficientScatterSub",
|
|
20
20
|
backendName: "cpu",
|
|
21
|
-
kernelFunc:
|
|
21
|
+
kernelFunc: L
|
|
22
22
|
};
|
|
23
|
-
b(
|
|
23
|
+
b(T);
|
package/dist/ops/dot16.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import { b as d } from "../matMul16-
|
|
1
|
+
import { b as d } from "../matMul16-xswmhSuF.js";
|
|
2
2
|
import { transpose16 as w } from "./transpose16.js";
|
|
3
3
|
import { reshape16 as n } from "./reshape16.js";
|
|
4
4
|
import { isPackedTensor as p } from "../utilities/packed.js";
|
|
5
|
-
import { d as x } from "../tfjs_backend-
|
|
5
|
+
import { d as x } from "../tfjs_backend-806hyYve.js";
|
|
6
6
|
function E(e, s, h = !1, c = !1) {
|
|
7
7
|
if (!p(e) && !p(s))
|
|
8
8
|
return x(e, s);
|
package/dist/ops/gatherSub.js
CHANGED
package/dist/ops/gelu.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import "../index-
|
|
1
|
+
import "../index-Duu1Lvvv.js";
|
|
2
2
|
import "./cpu/gelu.js";
|
|
3
3
|
import "./webgl/gelu.js";
|
|
4
|
-
import { d as e, g as i } from "../gelu-
|
|
4
|
+
import { d as e, g as i } from "../gelu-BK1k-n1i.js";
|
|
5
5
|
export {
|
|
6
6
|
e as dGelu,
|
|
7
7
|
i as gelu
|
package/dist/ops/grads/add16.js
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { i as u, _ as h, $ as d } from "../../index-Duu1Lvvv.js";
|
|
2
2
|
import { sum16 as c } from "../sum16.js";
|
|
3
3
|
import { reshape16 as p } from "../reshape16.js";
|
|
4
4
|
const A = {
|
|
5
5
|
kernelName: "Add16",
|
|
6
6
|
inputsToSave: ["a", "b"],
|
|
7
|
-
gradFunc: (s,
|
|
8
|
-
const [t, a] =
|
|
7
|
+
gradFunc: (s, i) => {
|
|
8
|
+
const [t, a] = i, n = h(t.shape, a.shape);
|
|
9
9
|
if (Array.isArray(s))
|
|
10
10
|
throw new Error("Add16 gradFunc expected dy to be a Tensor but got an array");
|
|
11
11
|
return { a: () => {
|
|
@@ -23,4 +23,4 @@ const A = {
|
|
|
23
23
|
} };
|
|
24
24
|
}
|
|
25
25
|
};
|
|
26
|
-
|
|
26
|
+
u(A);
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { m as o } from "../../matMul16-
|
|
1
|
+
import { i as m } from "../../index-Duu1Lvvv.js";
|
|
2
|
+
import { m as o } from "../../matMul16-xswmhSuF.js";
|
|
3
3
|
import { transpose16 as c } from "../transpose16.js";
|
|
4
4
|
const l = {
|
|
5
5
|
kernelName: "AttentionMask",
|