@genai-fi/nanogpt 0.10.1 → 0.10.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.js +11761 -171
- package/dist/{RealDiv-DgA3z9oO.js → RealDiv-KAPDe8zB.js} +28 -30
- package/dist/Reshape-BYkmUnAv.js +14 -0
- package/dist/{Reshape-_kILl6tK.js → Reshape-Zt6eb7yh.js} +18 -20
- package/dist/TeachableLLM.js +10 -11
- package/dist/{axis_util-BvHEw88j.js → axis_util-BaG7mf5A.js} +3 -3
- package/dist/backend.js +2 -2
- package/dist/{backend_util-D-rUb2ty.js → backend_util-RCe-rHaj.js} +59 -60
- package/dist/{backend_webgpu-B0u2ndUn.js → backend_webgpu-DE3ACOLx.js} +45 -47
- package/dist/broadcast_to-B3eYlZm7.js +28 -0
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +7 -11
- package/dist/checks/normRMS.js +9 -9
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +12 -13
- package/dist/checks/rope.js +2 -2
- package/dist/clip_by_value-BnO7-a88.js +12 -0
- package/dist/complex-DjxcVmoX.js +11 -0
- package/dist/concat-BV8bt5H-.js +17 -0
- package/dist/{concat_util-DcJk7YHS.js → concat_util-DpW8mL_l.js} +1 -1
- package/dist/{dataset-0xP8GjwI.js → dataset-BcwmTGYc.js} +137 -139
- package/dist/dropout-BcvN9JYi.js +92 -0
- package/dist/expand_dims-DT4tEPwA.js +11 -0
- package/dist/{exports_initializers-xuidcwI4.js → exports_initializers-Hta_rEnm.js} +1 -1
- package/dist/floor-D5QdR_le.js +9 -0
- package/dist/gather-D3JcZUaI.js +9 -0
- package/dist/{gelu-CNLFZWea.js → gelu-CjNPL4OH.js} +10 -11
- package/dist/{gpgpu_math-DDVJCn6-.js → gpgpu_math-DAOmgtXR.js} +841 -1015
- package/dist/{index-CjOj7j-u.js → index-BwexR4lA.js} +262 -263
- package/dist/index-DOvlwCh-.js +3520 -0
- package/dist/{kernel_funcs_utils-Dg_-E44D.js → kernel_funcs_utils-CCzYdUZg.js} +129 -131
- package/dist/layers/BaseLayer.js +14 -15
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +7 -7
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +9 -9
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +13 -14
- package/dist/log_sum_exp-ngO0-4pK.js +39 -0
- package/dist/main.js +49 -50
- package/dist/{matMul16--R5hOwDG.js → matMul16-BWRSOCWB.js} +14 -15
- package/dist/matMulGelu-CzfgT6Wq.js +163 -0
- package/dist/mat_mul-SjpJRLyL.js +11 -0
- package/dist/mod-AnXEvvpo.js +11 -0
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +13 -14
- package/dist/ones-D2rT0xk2.js +14 -0
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +13 -14
- package/dist/ops/cpu/adamMoments.js +6 -7
- package/dist/ops/cpu/appendCache.js +7 -8
- package/dist/ops/cpu/attentionMask.js +7 -7
- package/dist/ops/cpu/fusedSoftmax.js +10 -11
- package/dist/ops/cpu/gatherSub.js +9 -10
- package/dist/ops/cpu/gelu.js +9 -10
- package/dist/ops/cpu/matMul16.js +6 -7
- package/dist/ops/cpu/matMulGelu.js +5 -6
- package/dist/ops/cpu/matMulMul.js +3 -4
- package/dist/ops/cpu/mulDropout.js +3 -4
- package/dist/ops/cpu/normRMS.js +10 -11
- package/dist/ops/cpu/qkv.js +8 -9
- package/dist/ops/cpu/rope.js +5 -6
- package/dist/ops/cpu/scatterSub.js +17 -19
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +11 -12
- package/dist/ops/grads/attentionMask.js +5 -6
- package/dist/ops/grads/gelu.js +3 -4
- package/dist/ops/grads/matMul16.js +4 -5
- package/dist/ops/grads/matMulGelu.js +9 -10
- package/dist/ops/grads/normRMS.js +7 -8
- package/dist/ops/grads/pack16.js +4 -5
- package/dist/ops/grads/qkv.js +17 -19
- package/dist/ops/grads/rope.js +3 -5
- package/dist/ops/grads/softmax16.js +3 -4
- package/dist/ops/grads/unpack16.js +3 -4
- package/dist/ops/grads/utils.d.ts +1 -0
- package/dist/ops/grads/utils.js +8 -4
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +2 -2
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +3 -4
- package/dist/ops/qkv.js +4 -8
- package/dist/ops/reshape16.js +14 -16
- package/dist/ops/rope.d.ts +1 -1
- package/dist/ops/rope.js +3 -8
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +5 -8
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +23 -24
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +2 -3
- package/dist/ops/webgl/adamMoments.js +1 -2
- package/dist/ops/webgl/appendCache.js +1 -2
- package/dist/ops/webgl/attentionMask.js +4 -5
- package/dist/ops/webgl/fusedSoftmax.js +4 -6
- package/dist/ops/webgl/gatherSub.js +6 -7
- package/dist/ops/webgl/gelu.js +2 -3
- package/dist/ops/webgl/log.js +11 -12
- package/dist/ops/webgl/matMul16.js +10 -11
- package/dist/ops/webgl/matMulGelu.js +7 -111
- package/dist/ops/webgl/matMulMul.js +9 -10
- package/dist/ops/webgl/mulDropout.js +8 -9
- package/dist/ops/webgl/normRMS.js +2 -3
- package/dist/ops/webgl/qkv.js +5 -6
- package/dist/ops/webgl/rope.js +7 -8
- package/dist/ops/webgl/scatterSub.js +5 -6
- package/dist/ops/webgpu/adamAdjust.js +10 -12
- package/dist/ops/webgpu/adamMoments.js +8 -10
- package/dist/ops/webgpu/add16.js +8 -9
- package/dist/ops/webgpu/appendCache.js +23 -25
- package/dist/ops/webgpu/attentionMask.js +8 -10
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +12 -14
- package/dist/ops/webgpu/gatherSub.js +11 -13
- package/dist/ops/webgpu/gelu.js +28 -29
- package/dist/ops/webgpu/matMul16.js +26 -28
- package/dist/ops/webgpu/matMul16_program.js +4 -5
- package/dist/ops/webgpu/mul16.js +9 -10
- package/dist/ops/webgpu/normRMS.js +15 -17
- package/dist/ops/webgpu/normRMSGrad.js +21 -28
- package/dist/ops/webgpu/pack16.js +12 -13
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +16 -18
- package/dist/ops/webgpu/rope.js +25 -27
- package/dist/ops/webgpu/scatterSub.js +7 -9
- package/dist/ops/webgpu/slice16.js +21 -23
- package/dist/ops/webgpu/softmax16.js +17 -19
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +7 -8
- package/dist/ops/webgpu/sub16.js +7 -8
- package/dist/ops/webgpu/sum16.js +18 -20
- package/dist/ops/webgpu/transpose16.js +19 -20
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +11 -12
- package/dist/ops/webgpu/unpack16.js +3 -4
- package/dist/ops/webgpu/utils/binary_op.js +7 -8
- package/dist/ops/webgpu/utils/reductions.js +14 -22
- package/dist/ops-B5yanEdW.js +476 -0
- package/dist/pack16-nQ6JaLo-.js +39 -0
- package/dist/patches/webgpu_backend.js +19 -20
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +21 -22
- package/dist/{random_width-DY6Kk2Dl.js → random_width-or-CEftb.js} +2506 -2761
- package/dist/range-BklejeeW.js +10 -0
- package/dist/relu-CP0ZcxWO.js +9 -0
- package/dist/reshape-ByE68wS9.js +9 -0
- package/dist/resize_nearest_neighbor-B19mCEg2.js +175 -0
- package/dist/rope-Ir4mTyD1.js +24 -0
- package/dist/{scatter_nd_util-5EL-8VAQ.js → scatter_nd_util-lvSiX8q4.js} +1 -1
- package/dist/selu_util-kbhpTdYD.js +44 -0
- package/dist/{shared-BRksrJb3.js → shared-DT1TkE6w.js} +1 -1
- package/dist/{shared-BuAXb4CI.js → shared-dntlHIDQ.js} +343 -345
- package/dist/slice-BfEGSH82.js +12 -0
- package/dist/{slice_util-DtEldBfK.js → slice_util-uTKwiEpW.js} +1 -1
- package/dist/{softmax-ZHVebtR1.js → softmax-CA5jFsLR.js} +4 -5
- package/dist/split-CVLc0w--.js +9 -0
- package/dist/squeeze-C7Z2srUo.js +10 -0
- package/dist/stack-Cf4n9h0N.js +11 -0
- package/dist/step-CINUs5QB.js +261 -0
- package/dist/sum-DWAtNGez.js +11 -0
- package/dist/tensor-DJoc7gJU.js +8 -0
- package/dist/tensor1d-D11P_7Dp.js +11 -0
- package/dist/{tensor2d-G4Ys2GxX.js → tensor2d-Bs9wZRc7.js} +6 -7
- package/dist/{tensor4d-B8roDgtc.js → tensor4d-BARPdTaS.js} +6 -7
- package/dist/{tfjs_backend-kNyO5L2d.js → tfjs_backend-y1cvNhLA.js} +244 -253
- package/dist/tile-mbfagpsB.js +11 -0
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.js +2 -2
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.js +2 -2
- package/dist/training/sparseCrossEntropy.js +5 -5
- package/dist/transpose-ClWiBS_b.js +36 -0
- package/dist/unsorted_segment_sum-BDDhB_E6.js +277 -0
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.d.ts +1 -4
- package/dist/utilities/packed.js +10 -711
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.js +5 -5
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-Bhn5bHYv.js → variable-WawDEaAb.js} +1 -1
- package/dist/{webgpu_program-Cigz-7RF.js → webgpu_program-DuOXPQol.js} +178 -172
- package/dist/{webgpu_util-BBCnKm2X.js → webgpu_util-RxEF33Rj.js} +34 -35
- package/dist/zeros-KnWaWf-X.js +13 -0
- package/dist/zeros_like-DvE73F4e.js +721 -0
- package/package.json +4 -2
- package/dist/Reshape-CF6odzV4.js +0 -16
- package/dist/broadcast_to-CwF7XIeu.js +0 -30
- package/dist/complex-CSlYz-2T.js +0 -13
- package/dist/concat-BHlIJeyT.js +0 -19
- package/dist/dropout-C1pM3f11.js +0 -99
- package/dist/expand_dims-BPG4fwBP.js +0 -13
- package/dist/gather-DykLGqmW.js +0 -10
- package/dist/index-ZyQhjEPo.js +0 -2157
- package/dist/log_sum_exp-DWI-76TI.js +0 -41
- package/dist/mat_mul-DeAh4uTH.js +0 -12
- package/dist/mod-Gt1rMB4n.js +0 -12
- package/dist/mulmat_packed_gpu-BMFhLwta.js +0 -55
- package/dist/ones-CAMiP4I2.js +0 -15
- package/dist/ops-CNI3TwqM.js +0 -645
- package/dist/pack16-CFUqumar.js +0 -41
- package/dist/patches/PackedTensor.d.ts +0 -12
- package/dist/patches/PackedTensor.js +0 -11
- package/dist/patches/engine.d.ts +0 -261
- package/dist/patches/engine.js +0 -10
- package/dist/patches/tape.d.ts +0 -12
- package/dist/patches/tape.js +0 -5
- package/dist/range-BMS52eQi.js +0 -11
- package/dist/reciprocal-CTmshQ9J.js +0 -10
- package/dist/register_all_kernels-Bwu1PTuU.js +0 -12307
- package/dist/relu-yZ2-7WxU.js +0 -10
- package/dist/reshape-DevtBWtf.js +0 -10
- package/dist/rope-B5UUMsPi.js +0 -32
- package/dist/selu_util-D1w6yyTO.js +0 -303
- package/dist/sin-BGfy2HZo.js +0 -16
- package/dist/slice-D_gkkqZK.js +0 -13
- package/dist/split-DrfihRpZ.js +0 -10
- package/dist/squeeze-DZEpeblb.js +0 -11
- package/dist/stack-yOIAalTq.js +0 -13
- package/dist/sum-_fzj5ZTB.js +0 -12
- package/dist/tensor-DdQUJZlz.js +0 -909
- package/dist/tensor-f35l8Odg.js +0 -8
- package/dist/tensor1d-CeZuc-Rv.js +0 -12
- package/dist/tensor_util-DV-FP5Q3.js +0 -523
- package/dist/tile-BzyEiF-F.js +0 -13
- package/dist/transpose-DKELTqhe.js +0 -38
- package/dist/zeros-2gldETuK.js +0 -14
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { A as e, B as a, l as i, E as c, T as l } from "./index-DOvlwCh-.js";
|
|
2
|
+
function u(r, t) {
|
|
3
|
+
const n = a(r, "x", "tile", "string_or_numeric");
|
|
4
|
+
i(n.rank === t.length, () => `Error in transpose: rank of input ${n.rank} must match length of reps ${t}.`);
|
|
5
|
+
const s = { x: n }, o = { reps: t };
|
|
6
|
+
return c.runKernel(l, s, o);
|
|
7
|
+
}
|
|
8
|
+
const p = /* @__PURE__ */ e({ tile_: u });
|
|
9
|
+
export {
|
|
10
|
+
p as t
|
|
11
|
+
};
|
package/dist/training/Adam.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { adamAdjust as b } from "../ops/adamAdjust.js";
|
|
2
2
|
import { adamMoments as d } from "../ops/adamMoments.js";
|
|
3
|
-
import { O as g, e as h, t as o, d as B } from "../index-
|
|
4
|
-
import { z as M } from "../zeros-
|
|
3
|
+
import { O as g, e as h, t as o, d as B } from "../index-DOvlwCh-.js";
|
|
4
|
+
import { z as M } from "../zeros-KnWaWf-X.js";
|
|
5
5
|
class R extends g {
|
|
6
6
|
constructor(t, a, e, s, i = null) {
|
|
7
7
|
super(), this.learningRate = t, this.beta1 = a, this.beta2 = e, this.lossScaling = s, this.epsilon = i, this.accBeta1 = a, this.accBeta2 = e, i === null && (this.epsilon = h().backend.epsilon());
|
package/dist/training/AdamExt.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { t as g } from "../index-
|
|
2
|
-
import { d as u, i as d } from "../dataset-
|
|
1
|
+
import { t as g } from "../index-DOvlwCh-.js";
|
|
2
|
+
import { d as u, i as d } from "../dataset-BcwmTGYc.js";
|
|
3
3
|
import "../index-Cp39cXWe.js";
|
|
4
4
|
function z(r) {
|
|
5
5
|
return u(async () => {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import b from "./Trainer.js";
|
|
2
2
|
import L from "./Evaluator.js";
|
|
3
|
-
import { d as w } from "../index-
|
|
3
|
+
import { d as w } from "../index-DOvlwCh-.js";
|
|
4
4
|
import y from "../utilities/profile.js";
|
|
5
5
|
import { createTensorStatistics as D } from "../checks/weights.js";
|
|
6
6
|
const T = {
|
package/dist/training/Trainer.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { DatasetBuilder as f, flattenTokens as h, PAGE_FACTOR as y } from "./DatasetBuilder.js";
|
|
2
2
|
import z from "./AdamExt.js";
|
|
3
|
-
import { t as S, v as k, k as x, d as p, b as m } from "../index-
|
|
4
|
-
import { z as g } from "../zeros-
|
|
3
|
+
import { t as S, v as k, k as x, d as p, b as m } from "../index-DOvlwCh-.js";
|
|
4
|
+
import { z as g } from "../zeros-KnWaWf-X.js";
|
|
5
5
|
class M {
|
|
6
6
|
constructor(t, e, s = 1e-3) {
|
|
7
7
|
this.tokenizer = e, this.model = t, this.lossScaling = t.lossScaling, this.learningRate = s, this.resetOptimizer(), this.datasetBuilder = new f(e, t.config.blockSize);
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
import { gatherSub as x } from "../ops/gatherSub.js";
|
|
2
2
|
import { scatterSub as L } from "../ops/scatterSub.js";
|
|
3
|
-
import {
|
|
4
|
-
import { s as y } from "../softmax-
|
|
5
|
-
import { m as z, l as v } from "../log_sum_exp-
|
|
3
|
+
import { a6 as C, t as u, a7 as E, c as G } from "../index-DOvlwCh-.js";
|
|
4
|
+
import { s as y } from "../softmax-CA5jFsLR.js";
|
|
5
|
+
import { m as z, l as v } from "../log_sum_exp-ngO0-4pK.js";
|
|
6
6
|
function k(t, s) {
|
|
7
7
|
return u(() => {
|
|
8
8
|
const n = t.shape[t.shape.length - 1], c = t.shape.slice(0, -1).reduce((o, e) => o * e, 1), h = t.shape.length > 2 ? t.reshape([c, n]) : t, p = s.shape.length > 1 ? s.reshape([c]).cast("int32") : s.cast("int32"), r = z(h, -1, !0), a = G(h, r), d = v(a, -1);
|
|
9
9
|
return x(d, p, a);
|
|
10
10
|
});
|
|
11
11
|
}
|
|
12
|
-
function
|
|
12
|
+
function w() {
|
|
13
13
|
return C(
|
|
14
14
|
// @ts-expect-error Invalid params
|
|
15
15
|
(s, n, m) => {
|
|
@@ -22,6 +22,6 @@ function q() {
|
|
|
22
22
|
);
|
|
23
23
|
}
|
|
24
24
|
export {
|
|
25
|
-
|
|
25
|
+
w as createSoftmaxCrossEntropyWithGrad,
|
|
26
26
|
k as sparseSoftmaxCrossEntropy
|
|
27
27
|
};
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { A as u, B as i, E as o, ap as $, aq as g, ar as m, l, t as x, as as p } from "./index-DOvlwCh-.js";
|
|
2
|
+
import { c as k } from "./complex-DjxcVmoX.js";
|
|
3
|
+
function K(r) {
|
|
4
|
+
const e = { input: i(r, "input", "imag") };
|
|
5
|
+
return o.runKernel($, e);
|
|
6
|
+
}
|
|
7
|
+
const h = /* @__PURE__ */ u({ imag_: K });
|
|
8
|
+
function E(r) {
|
|
9
|
+
const e = { x: i(r, "x", "neg") };
|
|
10
|
+
return o.runKernel(g, e);
|
|
11
|
+
}
|
|
12
|
+
const _ = /* @__PURE__ */ u({ neg_: E });
|
|
13
|
+
function b(r) {
|
|
14
|
+
const e = { input: i(r, "input", "real") };
|
|
15
|
+
return o.runKernel(m, e);
|
|
16
|
+
}
|
|
17
|
+
const d = /* @__PURE__ */ u({ real_: b });
|
|
18
|
+
function N(r, t, e) {
|
|
19
|
+
const n = i(r, "x", "transpose");
|
|
20
|
+
if (t == null && (t = n.shape.map((s, a) => a).reverse()), l(n.rank === t.length, () => `Error in transpose: rank of input ${n.rank} must match length of perm ${t}.`), t.forEach((s) => {
|
|
21
|
+
l(s >= 0 && s < n.rank, () => `All entries in 'perm' must be between 0 and ${n.rank - 1} but got ${t}`);
|
|
22
|
+
}), n.rank <= 1)
|
|
23
|
+
return n.clone();
|
|
24
|
+
const f = { x: n }, c = { perm: t };
|
|
25
|
+
return n.dtype === "complex64" ? x(() => {
|
|
26
|
+
let s = d(n), a = h(n);
|
|
27
|
+
return s = o.runKernel(p, { x: s }, c), a = o.runKernel(p, { x: a }, c), e && (a = _(a)), k(s, a);
|
|
28
|
+
}) : o.runKernel(p, f, c);
|
|
29
|
+
}
|
|
30
|
+
const y = /* @__PURE__ */ u({ transpose_: N });
|
|
31
|
+
export {
|
|
32
|
+
h as i,
|
|
33
|
+
_ as n,
|
|
34
|
+
d as r,
|
|
35
|
+
y as t
|
|
36
|
+
};
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import { A as h, B as c, E as d, bo as T, bp as q, bq as H, l, br as P, X as _, bs as y, bt as B, bu as I, bv as W, bw as A, bx as G, by as L, bz as O, bA as z, bB as F, L as M, a3 as j, bC as J, bD as Q, bE as U, a6 as V, c as N, m as X, bF as Y, bG as Z, bH as R, bI as nn, bJ as tn, bK as sn, bL as en, bM as rn, bN as on, bO as an, bP as un, aG as cn, bQ as ln } from "./index-DOvlwCh-.js";
|
|
2
|
+
import { k as C, c as g, m as D } from "./step-CINUs5QB.js";
|
|
3
|
+
import { r as b } from "./reshape-ByE68wS9.js";
|
|
4
|
+
import { m as pn, a as hn, e as w } from "./log_sum_exp-ngO0-4pK.js";
|
|
5
|
+
import { s as K } from "./sum-DWAtNGez.js";
|
|
6
|
+
function fn(s, n = null, t = !1) {
|
|
7
|
+
const i = { x: c(s, "x", "all", "bool") }, o = { axis: n, keepDims: t };
|
|
8
|
+
return d.runKernel(T, i, o);
|
|
9
|
+
}
|
|
10
|
+
const nt = /* @__PURE__ */ h({ all_: fn });
|
|
11
|
+
function dn(s, n = null, t = !1) {
|
|
12
|
+
const i = { x: c(s, "x", "any", "bool") }, o = { axis: n, keepDims: t };
|
|
13
|
+
return d.runKernel(q, i, o);
|
|
14
|
+
}
|
|
15
|
+
const tt = /* @__PURE__ */ h({ any_: dn });
|
|
16
|
+
function mn(s, n = 0) {
|
|
17
|
+
const e = { x: c(s, "x", "argMax") }, i = { axis: n };
|
|
18
|
+
return d.runKernel(H, e, i);
|
|
19
|
+
}
|
|
20
|
+
const st = /* @__PURE__ */ h({ argMax_: mn });
|
|
21
|
+
function $n(s, n, t, e, i) {
|
|
22
|
+
const o = c(s, "x", "avgPool", "float32"), p = 1;
|
|
23
|
+
l(C(t, p), () => `Error in avgPool: Either strides or dilations must be 1. Got strides ${t} and dilations '${p}'`);
|
|
24
|
+
let r = o, a = !1;
|
|
25
|
+
o.rank === 3 && (a = !0, r = b(o, [1, o.shape[0], o.shape[1], o.shape[2]])), l(r.rank === 4, () => `Error in avgPool: x must be rank 4 but got rank ${r.rank}.`), g("avgPool", e, i);
|
|
26
|
+
const u = { x: r }, m = { filterSize: n, strides: t, pad: e, dimRoundingMode: i };
|
|
27
|
+
let f = d.runKernel(P, u, m);
|
|
28
|
+
return f = _(f, o.dtype), a ? b(f, [f.shape[1], f.shape[2], f.shape[3]]) : f;
|
|
29
|
+
}
|
|
30
|
+
const et = /* @__PURE__ */ h({ avgPool_: $n });
|
|
31
|
+
function bn(s) {
|
|
32
|
+
const t = { x: c(s, "x", "tanh", "float32") };
|
|
33
|
+
return d.runKernel(y, t);
|
|
34
|
+
}
|
|
35
|
+
const rt = /* @__PURE__ */ h({ tanh_: bn });
|
|
36
|
+
function xn(s, n, t) {
|
|
37
|
+
const e = c(s, "x", "batchToSpaceND"), i = n.reduce((r, a) => r * a);
|
|
38
|
+
l(e.rank >= 1 + n.length, () => `input rank is ${e.rank} but should be > than blockShape.length ${n.length}`), l(t.length === n.length, () => `crops.length is ${t.length} but should be equal to blockShape.length ${n.length}`), l(e.shape[0] % i === 0, () => `input tensor batch is ${e.shape[0]} but is not divisible by the product of the elements of blockShape ${n.join(" * ")} === ${i}`);
|
|
39
|
+
const o = { x: e }, p = { blockShape: n, crops: t };
|
|
40
|
+
return d.runKernel(B, o, p);
|
|
41
|
+
}
|
|
42
|
+
const ot = /* @__PURE__ */ h({ batchToSpaceND_: xn });
|
|
43
|
+
function kn(s) {
|
|
44
|
+
let n;
|
|
45
|
+
return s.rank === 0 || s.rank === 1 ? n = b(s, [1, 1, 1, s.size]) : s.rank === 2 ? n = b(s, [1, 1, s.shape[0], s.shape[1]]) : s.rank === 3 ? n = b(s, [1, s.shape[0], s.shape[1], s.shape[2]]) : n = s, n;
|
|
46
|
+
}
|
|
47
|
+
function vn(s, n, t, e, i, o) {
|
|
48
|
+
o == null && (o = 1e-3);
|
|
49
|
+
const p = c(s, "x", "batchNorm"), r = c(n, "mean", "batchNorm"), a = c(t, "variance", "batchNorm");
|
|
50
|
+
let u;
|
|
51
|
+
i != null && (u = c(i, "scale", "batchNorm"));
|
|
52
|
+
let m;
|
|
53
|
+
e != null && (m = c(e, "offset", "batchNorm")), l(r.rank === a.rank, () => "Batch normalization gradient requires mean and variance to have equal ranks."), l(m == null || r.rank === m.rank, () => "Batch normalization gradient requires mean and offset to have equal ranks."), l(u == null || r.rank === u.rank, () => "Batch normalization gradient requires mean and scale to have equal ranks.");
|
|
54
|
+
const x = {
|
|
55
|
+
x: kn(p),
|
|
56
|
+
scale: u,
|
|
57
|
+
offset: m,
|
|
58
|
+
mean: r,
|
|
59
|
+
variance: a
|
|
60
|
+
}, k = { varianceEpsilon: o }, $ = d.runKernel(I, x, k);
|
|
61
|
+
return b($, p.shape);
|
|
62
|
+
}
|
|
63
|
+
const at = /* @__PURE__ */ h({ batchNorm_: vn });
|
|
64
|
+
function gn(s, n, t, e, i = "NHWC", o = [1, 1], p) {
|
|
65
|
+
const r = c(s, "x", "conv2d", "float32"), a = c(n, "filter", "conv2d", "float32");
|
|
66
|
+
let u = r, m = !1;
|
|
67
|
+
r.rank === 3 && (m = !0, u = b(r, [1, r.shape[0], r.shape[1], r.shape[2]])), l(u.rank === 4, () => `Error in conv2d: input must be rank 4, but got rank ${u.rank}.`), l(a.rank === 4, () => `Error in conv2d: filter must be rank 4, but got rank ${a.rank}.`), g("conv2d", e, p);
|
|
68
|
+
const f = i === "NHWC" ? u.shape[3] : u.shape[1];
|
|
69
|
+
l(f === a.shape[2], () => `Error in conv2d: depth of input (${f}) must match input depth for filter ${a.shape[2]}.`), l(C(t, o), () => `Error in conv2D: Either strides or dilations must be 1. Got strides ${t} and dilations '${o}'`), l(D(o), () => "Error in conv2D: Dilated rates should be larger than 0."), l(D(t), () => "Error in conv2D: Strides should be larger than 0.");
|
|
70
|
+
const x = { x: u, filter: a }, k = { strides: t, pad: e, dataFormat: i, dilations: o, dimRoundingMode: p }, $ = d.runKernel(W, x, k);
|
|
71
|
+
return m ? b($, [$.shape[1], $.shape[2], $.shape[3]]) : $;
|
|
72
|
+
}
|
|
73
|
+
const S = /* @__PURE__ */ h({ conv2d_: gn });
|
|
74
|
+
function Dn(s, n, t, e, i = "NWC", o = 1, p) {
|
|
75
|
+
const r = c(s, "x", "conv1d"), a = c(n, "filter", "conv1d");
|
|
76
|
+
let u = r, m = !1;
|
|
77
|
+
r.rank === 2 && (m = !0, u = b(r, [1, r.shape[0], r.shape[1]])), l(u.rank === 3, () => `Error in conv1d: input must be rank 3, but got rank ${u.rank}.`), l(a.rank === 3, () => `Error in conv1d: filter must be rank 3, but got rank ${a.rank}.`), g("conv1d", e, p), l(u.shape[2] === a.shape[1], () => `Error in conv1d: depth of input (${u.shape[2]}) must match input depth for filter ${a.shape[1]}.`), l(C(t, o), () => `Error in conv1D: Either stride or dilation must be 1. Got stride ${t} and dilation '${o}'`), l(D(o), () => "Error in conv1D: Dilated rates should be larger than 0."), l(D(t), () => "Error in conv1D: Stride should be larger than 0."), l(i === "NWC", () => `Error in conv1d: got dataFormat of ${i} but only NWC is currently supported.`);
|
|
78
|
+
const f = b(a, [1, a.shape[0], a.shape[1], a.shape[2]]), x = b(u, [u.shape[0], 1, u.shape[1], u.shape[2]]), v = S(x, f, [1, t], e, "NHWC", [1, o], p);
|
|
79
|
+
return m ? b(v, [v.shape[2], v.shape[3]]) : b(v, [v.shape[0], v.shape[2], v.shape[3]]);
|
|
80
|
+
}
|
|
81
|
+
const it = /* @__PURE__ */ h({ conv1d_: Dn });
|
|
82
|
+
function Cn(s, n, t, e, i, o = "NHWC", p) {
|
|
83
|
+
l(s.length === n.rank, () => `Length of inShape (${s.length}) and rank of dy (${n.rank}) must match`);
|
|
84
|
+
let r = s, a = n, u = !1;
|
|
85
|
+
n.rank === 3 && (u = !0, a = b(n, [1, n.shape[0], n.shape[1], n.shape[2]]), r = [1, s[0], s[1], s[2]]), l(r.length === 4, () => `Error in conv2dDerInput: inShape must be length 4, but got length ${r.length}.`), l(a.rank === 4, () => `Error in conv2dDerInput: dy must be rank 4, but got rank ${a.rank}`), l(t.rank === 4, () => `Error in conv2dDerInput: filter must be rank 4, but got rank ${t.rank}`);
|
|
86
|
+
const m = o === "NHWC" ? r[3] : r[1], f = o === "NHWC" ? a.shape[3] : a.shape[1];
|
|
87
|
+
l(m === t.shape[2], () => `Error in conv2dDerInput: depth of input (${m}) must match input depth for filter ${t.shape[2]}.`), l(f === t.shape[3], () => `Error in conv2dDerInput: depth of output (${f}) must match output depth for filter ${t.shape[3]}.`), g("conv2dDerInput", i, p);
|
|
88
|
+
const x = { dy: a, filter: t }, k = { strides: e, pad: i, dataFormat: o, dimRoundingMode: p, inputShape: r }, $ = d.runKernel(A, x, k);
|
|
89
|
+
return u ? b($, [$.shape[1], $.shape[2], $.shape[3]]) : $;
|
|
90
|
+
}
|
|
91
|
+
const En = /* @__PURE__ */ h({ conv2DBackpropInput_: Cn });
|
|
92
|
+
function Nn(s, n, t, e, i, o) {
|
|
93
|
+
const p = c(s, "x", "conv2dTranspose"), r = c(n, "filter", "conv2dTranspose");
|
|
94
|
+
return En(t, p, r, e, i, "NHWC", o);
|
|
95
|
+
}
|
|
96
|
+
const ut = /* @__PURE__ */ h({ conv2dTranspose_: Nn });
|
|
97
|
+
function _n(s) {
|
|
98
|
+
const t = { x: c(s, "x", "cos", "float32") };
|
|
99
|
+
return d.runKernel(G, t);
|
|
100
|
+
}
|
|
101
|
+
const ct = /* @__PURE__ */ h({ cos_: _n });
|
|
102
|
+
function wn(s) {
|
|
103
|
+
const t = { x: c(s, "x", "cosh", "float32") };
|
|
104
|
+
return d.runKernel(L, t);
|
|
105
|
+
}
|
|
106
|
+
const lt = /* @__PURE__ */ h({ cosh_: wn });
|
|
107
|
+
function Kn(s, n = 0, t = !1, e = !1) {
|
|
108
|
+
const o = { x: c(s, "x", "cumprod") }, p = { axis: n, exclusive: t, reverse: e };
|
|
109
|
+
return d.runKernel(O, o, p);
|
|
110
|
+
}
|
|
111
|
+
const pt = /* @__PURE__ */ h({ cumprod_: Kn });
|
|
112
|
+
function Sn(s, n = 0, t = !1, e = !1) {
|
|
113
|
+
const o = { x: c(s, "x", "cumsum") }, p = { axis: n, exclusive: t, reverse: e };
|
|
114
|
+
return d.runKernel(z, o, p);
|
|
115
|
+
}
|
|
116
|
+
const ht = /* @__PURE__ */ h({ cumsum_: Sn });
|
|
117
|
+
function Tn(s, n, t, e, i = "NHWC", o = [1, 1], p) {
|
|
118
|
+
const r = c(s, "x", "depthwiseConv2d", "float32"), a = c(n, "filter", "depthwiseConv2d", "float32");
|
|
119
|
+
let u = r, m = !1;
|
|
120
|
+
r.rank === 3 && (m = !0, u = b(r, [1, r.shape[0], r.shape[1], r.shape[2]])), l(u.rank === 4, () => `Error in depthwiseConv2d: input must be rank 4, but got rank ${u.rank}.`), l(a.rank === 4, () => `Error in depthwiseConv2d: filter must be rank 4, but got rank ${a.rank}.`);
|
|
121
|
+
const f = i === "NHWC" ? u.shape[3] : u.shape[1];
|
|
122
|
+
l(f === a.shape[2], () => `Error in depthwiseConv2d: number of input channels (${f}) must match the inChannels dimension in filter ${a.shape[2]}.`), g("depthwiseConv2d", e, p);
|
|
123
|
+
const x = { x: u, filter: a }, k = { strides: t, pad: e, dataFormat: i, dilations: o, dimRoundingMode: p }, $ = d.runKernel(F, x, k);
|
|
124
|
+
return m ? b($, [$.shape[1], $.shape[2], $.shape[3]]) : $;
|
|
125
|
+
}
|
|
126
|
+
const qn = /* @__PURE__ */ h({ depthwiseConv2d_: Tn });
|
|
127
|
+
function Hn(s, n) {
|
|
128
|
+
let t = c(s, "a", "equal", "string_or_numeric"), e = c(n, "b", "equal", "string_or_numeric");
|
|
129
|
+
[t, e] = M(t, e), j(t.shape, e.shape);
|
|
130
|
+
const i = { a: t, b: e };
|
|
131
|
+
return d.runKernel(J, i);
|
|
132
|
+
}
|
|
133
|
+
const ft = /* @__PURE__ */ h({ equal_: Hn });
|
|
134
|
+
function Pn(s) {
|
|
135
|
+
let n = c(s, "x", "erf");
|
|
136
|
+
l(n.dtype === "int32" || n.dtype === "float32", () => "Input dtype must be `int32` or `float32`."), n.dtype === "int32" && (n = _(n, "float32"));
|
|
137
|
+
const t = { x: n };
|
|
138
|
+
return d.runKernel(Q, t);
|
|
139
|
+
}
|
|
140
|
+
const dt = /* @__PURE__ */ h({ erf_: Pn });
|
|
141
|
+
function yn(s) {
|
|
142
|
+
const t = { x: c(s, "x", "softplus") };
|
|
143
|
+
return d.runKernel(U, t);
|
|
144
|
+
}
|
|
145
|
+
const mt = /* @__PURE__ */ h({ softplus_: yn });
|
|
146
|
+
function Bn(s, n = -1) {
|
|
147
|
+
const t = c(s, "logits", "logSoftmax");
|
|
148
|
+
if (n === -1 && (n = t.rank - 1), n !== t.rank - 1)
|
|
149
|
+
throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${t.rank} and axis was ${n}`);
|
|
150
|
+
return V((i, o) => {
|
|
151
|
+
const r = pn(i, n, !0), a = N(i, r), u = N(_(a, "float32"), hn(K(w(a), n, !0)));
|
|
152
|
+
return o([u]), { value: u, gradFunc: (f, x) => {
|
|
153
|
+
const [k] = x, $ = !0, E = w(k);
|
|
154
|
+
return N(f, X(K(f, n, $), E));
|
|
155
|
+
} };
|
|
156
|
+
})(t);
|
|
157
|
+
}
|
|
158
|
+
const $t = /* @__PURE__ */ h({ logSoftmax_: Bn });
|
|
159
|
+
function In(s) {
|
|
160
|
+
const t = { x: c(s, "x", "logicalNot", "bool") };
|
|
161
|
+
return d.runKernel(Y, t);
|
|
162
|
+
}
|
|
163
|
+
const bt = /* @__PURE__ */ h({ logicalNot_: In });
|
|
164
|
+
function Wn(s, n, t, e, i) {
|
|
165
|
+
const o = c(s, "x", "maxPool"), p = 1;
|
|
166
|
+
let r = o, a = !1;
|
|
167
|
+
o.rank === 3 && (a = !0, r = b(o, [1, o.shape[0], o.shape[1], o.shape[2]])), l(r.rank === 4, () => `Error in maxPool: input must be rank 4 but got rank ${r.rank}.`), l(C(t, p), () => `Error in maxPool: Either strides or dilations must be 1. Got strides ${t} and dilations '${p}'`), g("maxPool", e, i);
|
|
168
|
+
const u = { x: r }, m = { filterSize: n, strides: t, pad: e, dimRoundingMode: i }, f = d.runKernel(Z, u, m);
|
|
169
|
+
return a ? b(f, [f.shape[1], f.shape[2], f.shape[3]]) : f;
|
|
170
|
+
}
|
|
171
|
+
const xt = /* @__PURE__ */ h({ maxPool_: Wn });
|
|
172
|
+
function An(s, n, t = 1, e = 0, i = "int32") {
|
|
173
|
+
if (n < 2)
|
|
174
|
+
throw new Error(`Error in oneHot: depth must be >=2, but it is ${n}`);
|
|
175
|
+
const p = { indices: c(s, "indices", "oneHot", "int32") }, r = { dtype: i, depth: n, onValue: t, offValue: e };
|
|
176
|
+
return d.runKernel(R, p, r);
|
|
177
|
+
}
|
|
178
|
+
const kt = /* @__PURE__ */ h({ oneHot_: An });
|
|
179
|
+
function Gn(s) {
|
|
180
|
+
const t = { x: c(s, "x", "onesLike") };
|
|
181
|
+
return d.runKernel(nn, t);
|
|
182
|
+
}
|
|
183
|
+
const vt = /* @__PURE__ */ h({ onesLike_: Gn });
|
|
184
|
+
function Ln(s, n, t = 0) {
|
|
185
|
+
const e = c(s, "x", "pad");
|
|
186
|
+
if (e.rank === 0)
|
|
187
|
+
throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");
|
|
188
|
+
const i = { paddings: n, constantValue: t }, o = { x: e };
|
|
189
|
+
return d.runKernel(tn, o, i);
|
|
190
|
+
}
|
|
191
|
+
const gt = /* @__PURE__ */ h({ pad_: Ln });
|
|
192
|
+
function On(s, n, t) {
|
|
193
|
+
const e = c(s, "x", "spaceToBatchND");
|
|
194
|
+
l(e.rank >= 1 + n.length, () => `input rank ${e.rank} should be > than [blockShape] ${n.length}`), l(t.length === n.length, () => `paddings.shape[0] ${t.length} must be equal to [blockShape] ${n.length}`), l(e.shape.reduce((p, r, a) => a > 0 && a <= n.length ? p && (r + t[a - 1][0] + t[a - 1][1]) % n[a - 1] === 0 : p, !0), () => `input spatial dimensions ${e.shape.slice(1)} with paddings ${t.toString()} must be divisible by blockShapes ${n.toString()}`);
|
|
195
|
+
const i = { x: e }, o = { blockShape: n, paddings: t };
|
|
196
|
+
return d.runKernel(sn, i, o);
|
|
197
|
+
}
|
|
198
|
+
const Dt = /* @__PURE__ */ h({ spaceToBatchND_: On });
|
|
199
|
+
function zn(s, n) {
|
|
200
|
+
const e = { x: c(s, "x", "reverse") }, i = { dims: n };
|
|
201
|
+
return d.runKernel(en, e, i);
|
|
202
|
+
}
|
|
203
|
+
const Ct = /* @__PURE__ */ h({ reverse_: zn });
|
|
204
|
+
function Fn(s) {
|
|
205
|
+
const t = { x: c(s, "x", "rsqrt", "float32") };
|
|
206
|
+
return d.runKernel(rn, t);
|
|
207
|
+
}
|
|
208
|
+
const Et = /* @__PURE__ */ h({ rsqrt_: Fn });
|
|
209
|
+
function Mn(s) {
|
|
210
|
+
const t = { x: c(s, "x", "selu") };
|
|
211
|
+
return d.runKernel(on, t);
|
|
212
|
+
}
|
|
213
|
+
const Nt = /* @__PURE__ */ h({ selu_: Mn });
|
|
214
|
+
function jn(s, n, t, e, i, o = [1, 1], p = "NHWC") {
|
|
215
|
+
const r = c(s, "x", "separableConv2d"), a = c(n, "depthwiseFilter", "separableConv2d"), u = c(t, "pointwiseFilter", "separableConv2d");
|
|
216
|
+
let m = r, f = !1;
|
|
217
|
+
if (r.rank === 3 && (f = !0, m = b(r, [1, r.shape[0], r.shape[1], r.shape[2]])), p === "NCHW")
|
|
218
|
+
throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");
|
|
219
|
+
l(m.rank === 4, () => `Error in separableConv2d: input must be rank 4, but got rank ${m.rank}.`), l(a.rank === 4, () => `Error in separableConv2d: depthwise filter must be rank 4, but got rank ${a.rank}.`), l(u.rank === 4, () => `Error in separableConv2d: pointwise filter must be rank 4, but got rank ${a.rank}.`), l(u.shape[0] === 1, () => `Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${u.shape[0]}.`), l(u.shape[1] === 1, () => `Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${u.shape[1]}.`);
|
|
220
|
+
const x = a.shape[2], k = a.shape[3];
|
|
221
|
+
l(u.shape[2] === x * k, () => `Error in separableConv2d: the third dimension of pointwise filter must be ${x * k}, but got ${u.shape[2]}.`);
|
|
222
|
+
const $ = qn(m, a, e, i, p, o), v = S($, u, 1, "valid", p);
|
|
223
|
+
return f ? b(v, [v.shape[1], v.shape[2], v.shape[3]]) : v;
|
|
224
|
+
}
|
|
225
|
+
const _t = /* @__PURE__ */ h({ separableConv2d_: jn });
|
|
226
|
+
function Jn(s) {
|
|
227
|
+
const t = { x: c(s, "x", "sin", "float32") };
|
|
228
|
+
return d.runKernel(an, t);
|
|
229
|
+
}
|
|
230
|
+
const wt = /* @__PURE__ */ h({ sin_: Jn });
|
|
231
|
+
function Qn(s) {
|
|
232
|
+
const t = { x: c(s, "x", "sinh") };
|
|
233
|
+
return d.runKernel(un, t);
|
|
234
|
+
}
|
|
235
|
+
const Kt = /* @__PURE__ */ h({ sinh_: Qn });
|
|
236
|
+
function Un(s, n, t) {
|
|
237
|
+
const e = c(s, "x", "unsortedSegmentSum"), i = c(n, "segmentIds", "unsortedSegmentSum", "int32");
|
|
238
|
+
l(cn(t), () => "numSegments must be of dtype int");
|
|
239
|
+
const o = { x: e, segmentIds: i }, p = { numSegments: t };
|
|
240
|
+
return d.runKernel(ln, o, p);
|
|
241
|
+
}
|
|
242
|
+
const St = /* @__PURE__ */ h({ unsortedSegmentSum_: Un });
|
|
243
|
+
export {
|
|
244
|
+
Et as A,
|
|
245
|
+
Nt as B,
|
|
246
|
+
_t as C,
|
|
247
|
+
Kt as D,
|
|
248
|
+
rt as E,
|
|
249
|
+
St as F,
|
|
250
|
+
En as G,
|
|
251
|
+
mt as a,
|
|
252
|
+
Dt as b,
|
|
253
|
+
ct as c,
|
|
254
|
+
et as d,
|
|
255
|
+
ft as e,
|
|
256
|
+
ot as f,
|
|
257
|
+
nt as g,
|
|
258
|
+
tt as h,
|
|
259
|
+
st as i,
|
|
260
|
+
at as j,
|
|
261
|
+
it as k,
|
|
262
|
+
bt as l,
|
|
263
|
+
xt as m,
|
|
264
|
+
ut as n,
|
|
265
|
+
S as o,
|
|
266
|
+
lt as p,
|
|
267
|
+
pt as q,
|
|
268
|
+
Ct as r,
|
|
269
|
+
wt as s,
|
|
270
|
+
ht as t,
|
|
271
|
+
qn as u,
|
|
272
|
+
dt as v,
|
|
273
|
+
$t as w,
|
|
274
|
+
kt as x,
|
|
275
|
+
vt as y,
|
|
276
|
+
gt as z
|
|
277
|
+
};
|
package/dist/utilities/dummy.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { a as y, e as S, v as w } from "../index-
|
|
2
|
-
import { z as m } from "../zeros-
|
|
3
|
-
import { o as P } from "../ones-
|
|
1
|
+
import { a as y, e as S, v as w } from "../index-DOvlwCh-.js";
|
|
2
|
+
import { z as m } from "../zeros-KnWaWf-X.js";
|
|
3
|
+
import { o as P } from "../ones-D2rT0xk2.js";
|
|
4
4
|
async function b(s) {
|
|
5
5
|
const t = m([1, s.config.blockSize], "int32"), [n, o] = s.forward({ training: !1 }, t);
|
|
6
6
|
await n.data(), n.dispose(), o && o.dispose(), t.dispose();
|
|
@@ -1,7 +1,4 @@
|
|
|
1
|
-
import { PackableTensor } from '../patches/PackedTensor';
|
|
2
1
|
import { Tensor } from '@tensorflow/tfjs-core';
|
|
3
2
|
export declare function packingSupported(): boolean;
|
|
4
|
-
export declare function isPackableTensor(tensor: Tensor):
|
|
3
|
+
export declare function isPackableTensor(tensor: Tensor): boolean;
|
|
5
4
|
export declare function isPackedTensor(tensor: Tensor): boolean;
|
|
6
|
-
export declare function packTensor(tensor: Tensor): Tensor;
|
|
7
|
-
export declare function unpackTensor(tensor: Tensor): Tensor;
|