@genai-fi/nanogpt 0.10.3 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.d.ts +10 -5
- package/dist/Generator.js +1789 -1765
- package/dist/{RealDiv-KAPDe8zB.js → RealDiv-C8neBwFi.js} +15 -15
- package/dist/{Reshape-BYkmUnAv.js → Reshape-Bd4V_4X7.js} +1 -1
- package/dist/{Reshape-Zt6eb7yh.js → Reshape-Ck29jQSY.js} +5 -5
- package/dist/TeachableLLM.d.ts +5 -3
- package/dist/TeachableLLM.js +14 -14
- package/dist/Trainer.d.ts +3 -1
- package/dist/Trainer.js +11 -8
- package/dist/{axis_util-BaG7mf5A.js → axis_util-DGqbT-FX.js} +3 -3
- package/dist/backend.js +2 -2
- package/dist/{backend_util-RCe-rHaj.js → backend_util-DC3rBo_H.js} +18 -18
- package/dist/{backend_webgpu-DE3ACOLx.js → backend_webgpu-mbhNnlx9.js} +3 -3
- package/dist/{broadcast_to-B3eYlZm7.js → broadcast_to-D1Dmg2Oz.js} +2 -2
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +2 -2
- package/dist/checks/normRMS.js +4 -4
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +4 -4
- package/dist/checks/rope.js +2 -2
- package/dist/{clip_by_value-BnO7-a88.js → clip_by_value-fg2aKzUy.js} +5 -5
- package/dist/complex-Cyg-eQeZ.js +11 -0
- package/dist/concat-CSm2rMwe.js +17 -0
- package/dist/{concat_util-DpW8mL_l.js → concat_util-D0je5Ppu.js} +1 -1
- package/dist/{dataset-BcwmTGYc.js → dataset-CVIJu7Xa.js} +7 -7
- package/dist/{dropout-BcvN9JYi.js → dropout-DLhSMNTZ.js} +9 -9
- package/dist/expand_dims-ChkuOp6I.js +11 -0
- package/dist/{exports_initializers-Hta_rEnm.js → exports_initializers-1KWPiStI.js} +1 -1
- package/dist/{floor-D5QdR_le.js → floor-BRMPgeIs.js} +1 -1
- package/dist/{gather-D3JcZUaI.js → gather-BSULDalH.js} +1 -1
- package/dist/{gelu-CjNPL4OH.js → gelu-BK1k-n1i.js} +1 -1
- package/dist/{gpgpu_math-DAOmgtXR.js → gpgpu_math-BJSTk_mW.js} +25 -25
- package/dist/{index-BwexR4lA.js → index-BBVLAXZD.js} +89 -89
- package/dist/{index-DOvlwCh-.js → index-Duu1Lvvv.js} +53 -53
- package/dist/{kernel_funcs_utils-CCzYdUZg.js → kernel_funcs_utils-BtYrPoJu.js} +6 -6
- package/dist/layers/BaseLayer.js +2 -2
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +5 -5
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +4 -4
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +9 -9
- package/dist/log_sum_exp-CVqLsVLl.js +39 -0
- package/dist/main.d.ts +10 -1
- package/dist/main.js +68 -58
- package/dist/{matMul16-BWRSOCWB.js → matMul16-xswmhSuF.js} +3 -3
- package/dist/{matMulGelu-CzfgT6Wq.js → matMulGelu-BpvgnYG8.js} +14 -14
- package/dist/mat_mul-Bn2BDpT4.js +11 -0
- package/dist/{mod-AnXEvvpo.js → mod-B4AUd1Np.js} +1 -1
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +9 -9
- package/dist/{ones-D2rT0xk2.js → ones-CBI1AQjb.js} +3 -3
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +9 -9
- package/dist/ops/cpu/adamMoments.js +5 -5
- package/dist/ops/cpu/appendCache.js +6 -6
- package/dist/ops/cpu/attentionMask.js +10 -10
- package/dist/ops/cpu/fusedSoftmax.js +5 -5
- package/dist/ops/cpu/gatherSub.js +9 -9
- package/dist/ops/cpu/gelu.js +5 -5
- package/dist/ops/cpu/matMul16.js +2 -2
- package/dist/ops/cpu/matMulGelu.js +3 -3
- package/dist/ops/cpu/matMulMul.js +5 -5
- package/dist/ops/cpu/mulDropout.js +1 -1
- package/dist/ops/cpu/normRMS.js +7 -7
- package/dist/ops/cpu/qkv.js +3 -3
- package/dist/ops/cpu/rope.js +5 -5
- package/dist/ops/cpu/scatterSub.js +11 -11
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +4 -4
- package/dist/ops/grads/attentionMask.js +2 -2
- package/dist/ops/grads/gelu.js +2 -2
- package/dist/ops/grads/matMul16.js +3 -3
- package/dist/ops/grads/matMulGelu.js +6 -6
- package/dist/ops/grads/normRMS.js +4 -4
- package/dist/ops/grads/pack16.js +3 -3
- package/dist/ops/grads/qkv.js +10 -10
- package/dist/ops/grads/rope.js +2 -2
- package/dist/ops/grads/softmax16.js +1 -1
- package/dist/ops/grads/unpack16.js +2 -2
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +2 -2
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +2 -2
- package/dist/ops/qkv.js +1 -1
- package/dist/ops/reshape16.js +2 -2
- package/dist/ops/rope.js +2 -2
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +1 -1
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +6 -6
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +2 -2
- package/dist/ops/webgl/adamMoments.js +1 -1
- package/dist/ops/webgl/appendCache.js +1 -1
- package/dist/ops/webgl/attentionMask.js +1 -1
- package/dist/ops/webgl/fusedSoftmax.js +4 -4
- package/dist/ops/webgl/gatherSub.js +1 -1
- package/dist/ops/webgl/gelu.js +2 -2
- package/dist/ops/webgl/log.js +3 -3
- package/dist/ops/webgl/matMul16.js +8 -8
- package/dist/ops/webgl/matMulGelu.js +4 -4
- package/dist/ops/webgl/matMulMul.js +7 -7
- package/dist/ops/webgl/mulDropout.js +1 -1
- package/dist/ops/webgl/normRMS.js +7 -7
- package/dist/ops/webgl/qkv.js +1 -1
- package/dist/ops/webgl/rope.js +1 -1
- package/dist/ops/webgl/scatterSub.js +1 -1
- package/dist/ops/webgpu/adamAdjust.js +3 -3
- package/dist/ops/webgpu/adamMoments.js +5 -5
- package/dist/ops/webgpu/add16.js +1 -1
- package/dist/ops/webgpu/appendCache.js +3 -3
- package/dist/ops/webgpu/attentionMask.js +2 -2
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +5 -5
- package/dist/ops/webgpu/gatherSub.js +5 -5
- package/dist/ops/webgpu/gelu.js +3 -3
- package/dist/ops/webgpu/matMul16.js +19 -19
- package/dist/ops/webgpu/matMul16_program.js +2 -2
- package/dist/ops/webgpu/mul16.js +4 -4
- package/dist/ops/webgpu/normRMS.js +6 -6
- package/dist/ops/webgpu/normRMSGrad.js +4 -4
- package/dist/ops/webgpu/pack16.js +3 -3
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +8 -8
- package/dist/ops/webgpu/rope.js +3 -3
- package/dist/ops/webgpu/scatterSub.js +3 -3
- package/dist/ops/webgpu/slice16.js +4 -4
- package/dist/ops/webgpu/softmax16.js +4 -4
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +1 -1
- package/dist/ops/webgpu/sub16.js +4 -4
- package/dist/ops/webgpu/sum16.js +5 -5
- package/dist/ops/webgpu/transpose16.js +2 -2
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +3 -3
- package/dist/ops/webgpu/unpack16.js +5 -5
- package/dist/ops/webgpu/utils/binary_op.js +3 -3
- package/dist/ops/webgpu/utils/reductions.js +4 -4
- package/dist/{ops-B5yanEdW.js → ops-C2_OXuZ4.js} +69 -69
- package/dist/{pack16-nQ6JaLo-.js → pack16-atD0eYRm.js} +9 -9
- package/dist/patches/webgpu_backend.js +6 -6
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +8 -8
- package/dist/{random_width-or-CEftb.js → random_width-BN4wGJaW.js} +33 -33
- package/dist/range-DKmP1-OQ.js +10 -0
- package/dist/relu-BsXmGzzu.js +9 -0
- package/dist/{reshape-ByE68wS9.js → reshape-BI0yzp1T.js} +1 -1
- package/dist/{resize_nearest_neighbor-B19mCEg2.js → resize_nearest_neighbor-BA_BX-ub.js} +26 -26
- package/dist/{rope-Ir4mTyD1.js → rope-DJ7Y7c-u.js} +1 -1
- package/dist/{scatter_nd_util-lvSiX8q4.js → scatter_nd_util-k9MUVUkn.js} +1 -1
- package/dist/{selu_util-kbhpTdYD.js → selu_util-DyW0X1WG.js} +5 -5
- package/dist/{shared-DT1TkE6w.js → shared-Q3BS6T03.js} +1 -1
- package/dist/{shared-dntlHIDQ.js → shared-nnSWpC3u.js} +86 -86
- package/dist/{slice-BfEGSH82.js → slice-wBNvzVyz.js} +1 -1
- package/dist/{slice_util-uTKwiEpW.js → slice_util-zN8KFC5I.js} +1 -1
- package/dist/{softmax-CA5jFsLR.js → softmax-DfuYyjMh.js} +1 -1
- package/dist/split-BYrLboMq.js +9 -0
- package/dist/squeeze-Bk8Brcct.js +10 -0
- package/dist/{stack-Cf4n9h0N.js → stack-CDWShFHF.js} +1 -1
- package/dist/{step-CINUs5QB.js → step-BS5JXRR6.js} +23 -23
- package/dist/{sum-DWAtNGez.js → sum-BPUfDB2X.js} +3 -3
- package/dist/tensor-CEt9Nm2s.js +8 -0
- package/dist/tensor1d-Cc_KCIDg.js +11 -0
- package/dist/{tensor2d-Bs9wZRc7.js → tensor2d-BN97fF71.js} +3 -3
- package/dist/{tensor4d-BARPdTaS.js → tensor4d-vuDDgdUI.js} +1 -1
- package/dist/{tfjs_backend-y1cvNhLA.js → tfjs_backend-806hyYve.js} +49 -49
- package/dist/{tile-mbfagpsB.js → tile-OWUvpIVt.js} +3 -3
- package/dist/tokeniser/BaseTokeniser.d.ts +25 -0
- package/dist/tokeniser/BaseTokeniser.js +94 -0
- package/dist/tokeniser/CharTokeniser.d.ts +10 -9
- package/dist/tokeniser/CharTokeniser.js +44 -30
- package/dist/tokeniser/bpe.d.ts +10 -9
- package/dist/tokeniser/bpe.js +67 -52
- package/dist/tokeniser/type.d.ts +14 -5
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.d.ts +3 -3
- package/dist/training/DatasetBuilder.js +34 -38
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.d.ts +4 -3
- package/dist/training/Trainer.js +22 -25
- package/dist/training/sparseCrossEntropy.js +3 -3
- package/dist/training/tasks/ConversationTask.d.ts +11 -0
- package/dist/training/tasks/ConversationTask.js +26 -0
- package/dist/training/tasks/PretrainingTask.d.ts +11 -0
- package/dist/training/tasks/PretrainingTask.js +34 -0
- package/dist/training/tasks/StartSentenceTask.d.ts +12 -0
- package/dist/training/tasks/StartSentenceTask.js +42 -0
- package/dist/training/tasks/Task.d.ts +8 -0
- package/dist/training/tasks/Task.js +41 -0
- package/dist/{transpose-ClWiBS_b.js → transpose-BUkQCJp9.js} +6 -6
- package/dist/{unsorted_segment_sum-BDDhB_E6.js → unsorted_segment_sum-BljxHhCY.js} +5 -5
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.js +1 -1
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.d.ts +1 -1
- package/dist/utilities/sentences.js +11 -11
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-WawDEaAb.js → variable-DPt_Iuog.js} +1 -1
- package/dist/{webgpu_program-DuOXPQol.js → webgpu_program-BpWRlghH.js} +3 -3
- package/dist/{webgpu_util-RxEF33Rj.js → webgpu_util-DMiKzzQM.js} +7 -7
- package/dist/{zeros-KnWaWf-X.js → zeros-5YROwwUH.js} +2 -2
- package/dist/{zeros_like-DvE73F4e.js → zeros_like-De4n1C3m.js} +71 -71
- package/package.json +1 -1
- package/dist/complex-DjxcVmoX.js +0 -11
- package/dist/concat-BV8bt5H-.js +0 -17
- package/dist/expand_dims-DT4tEPwA.js +0 -11
- package/dist/log_sum_exp-ngO0-4pK.js +0 -39
- package/dist/mat_mul-SjpJRLyL.js +0 -11
- package/dist/range-BklejeeW.js +0 -10
- package/dist/relu-CP0ZcxWO.js +0 -9
- package/dist/split-CVLc0w--.js +0 -9
- package/dist/squeeze-C7Z2srUo.js +0 -10
- package/dist/tensor-DJoc7gJU.js +0 -8
- package/dist/tensor1d-D11P_7Dp.js +0 -11
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { f as it, g as
|
|
1
|
+
import { x as Lt, bR as Ce, U as V, _ as Pt, a8 as K, ae as et, ac as ht, bS as dt, bT as at, a7 as De, Q as st, aQ as Ue, ar as We, bU as $e, bV as ze, bW as Be, bX as qt, at as H, bY as At, bZ as U, b_ as _t, bC as Vt, an as Ct, b$ as Dt, F as Ut, c0 as Wt, b1 as $t, b2 as zt, b3 as Bt, b4 as jt, ao as Gt, c1 as Zt, b8 as Ht, c2 as Kt, aq as je, aR as Ge, b9 as Xt, as as Ze, c3 as He, af as Qt, ak as Ke, t as Xe, bM as Yt, c4 as It, aH as Jt, S as Qe, c5 as te, bb as ee, c6 as ne, aB as pt, c7 as se, c8 as ot } from "./index-Duu1Lvvv.js";
|
|
2
|
+
import { f as it, g as Ye, a as Je, R as tn, v as en, d as nn, e as sn, h as on, i as rn, j as an, k as ln, l as cn, m as un, n as hn, o as fn, p as kt, q as dn, r as gn, s as mn } from "./backend_util-DC3rBo_H.js";
|
|
3
3
|
import { m as Nt } from "./complex_util-Yc1A_gV1.js";
|
|
4
|
-
import { a as pn, b as wn, e as In, c as bn } from "./axis_util-
|
|
5
|
-
import { b as xn } from "./broadcast_to-
|
|
6
|
-
import { r as En } from "./reshape-
|
|
7
|
-
import { p as Fn, a as yn, i as kn, c as Nn } from "./slice_util-
|
|
4
|
+
import { a as pn, b as wn, e as In, c as bn } from "./axis_util-DGqbT-FX.js";
|
|
5
|
+
import { b as xn } from "./broadcast_to-D1Dmg2Oz.js";
|
|
6
|
+
import { r as En } from "./reshape-BI0yzp1T.js";
|
|
7
|
+
import { p as Fn, a as yn, i as kn, c as Nn } from "./slice_util-zN8KFC5I.js";
|
|
8
8
|
import { g as vn } from "./_commonjsHelpers-ByX85dGu.js";
|
|
9
9
|
function Sn(e, t) {
|
|
10
10
|
for (var n = 0; n < t.length; n++) {
|
|
@@ -646,7 +646,7 @@ const tt = (
|
|
|
646
646
|
function gt(e) {
|
|
647
647
|
return tt.fromString(e, !0, 16);
|
|
648
648
|
}
|
|
649
|
-
const ae = gt("c3a5c85c97cb3127"),
|
|
649
|
+
const ae = gt("c3a5c85c97cb3127"), J = gt("b492b66fbe98f273"), D = gt("9ae16a3b2f90404f");
|
|
650
650
|
function bt(e) {
|
|
651
651
|
return e.xor(e.shru(47));
|
|
652
652
|
}
|
|
@@ -663,7 +663,7 @@ function St(e, t) {
|
|
|
663
663
|
function A(e, t) {
|
|
664
664
|
return t === 0 ? e : e.shru(t).or(e.shl(64 - t));
|
|
665
665
|
}
|
|
666
|
-
function
|
|
666
|
+
function Q(e, t, n = gt("9ddfea08eb382d69")) {
|
|
667
667
|
let s = e.xor(t).mul(n);
|
|
668
668
|
s = s.xor(s.shru(47));
|
|
669
669
|
let o = t.xor(s).mul(n);
|
|
@@ -680,11 +680,11 @@ function ut(e, t, n, s) {
|
|
|
680
680
|
function On(e, t = e.length) {
|
|
681
681
|
if (t >= 8) {
|
|
682
682
|
const n = D.add(t * 2), s = T(e, 0).add(D), o = T(e, t - 8), a = A(o, 37).mul(n).add(s), r = A(s, 25).add(o).mul(n);
|
|
683
|
-
return
|
|
683
|
+
return Q(a, r, n);
|
|
684
684
|
}
|
|
685
685
|
if (t >= 4) {
|
|
686
686
|
const n = D.add(t * 2), s = St(e, 0);
|
|
687
|
-
return
|
|
687
|
+
return Q(s.shl(3).add(t), St(e, t - 4), n);
|
|
688
688
|
}
|
|
689
689
|
if (t > 0) {
|
|
690
690
|
const n = e[0], s = e[t >> 1], o = e[t - 1], a = n + (s << 8), r = t + (o << 2);
|
|
@@ -693,12 +693,12 @@ function On(e, t = e.length) {
|
|
|
693
693
|
return D;
|
|
694
694
|
}
|
|
695
695
|
function Ln(e, t = e.length) {
|
|
696
|
-
const n = D.add(t * 2), s = T(e, 0).mul(
|
|
697
|
-
return
|
|
696
|
+
const n = D.add(t * 2), s = T(e, 0).mul(J), o = T(e, 8), a = T(e, t - 8).mul(n), r = T(e, t - 16).mul(D);
|
|
697
|
+
return Q(A(s.add(o), 43).add(A(a, 30)).add(r), s.add(A(o.add(D), 18)).add(a), n);
|
|
698
698
|
}
|
|
699
699
|
function Pn(e, t = e.length) {
|
|
700
|
-
const n = D.add(t * 2), s = T(e, 0).mul(D), o = T(e, 8), a = T(e, t - 8).mul(n), r = T(e, t - 16).mul(D), i = A(s.add(o), 43).add(A(a, 30)).add(r), l =
|
|
701
|
-
return
|
|
700
|
+
const n = D.add(t * 2), s = T(e, 0).mul(D), o = T(e, 8), a = T(e, t - 8).mul(n), r = T(e, t - 16).mul(D), i = A(s.add(o), 43).add(A(a, 30)).add(r), l = Q(i, s.add(A(o.add(D), 18)).add(a), n), u = T(e, 16).mul(n), h = T(e, 24), f = i.add(T(e, t - 32)).mul(n), p = l.add(T(e, t - 24)).mul(n);
|
|
701
|
+
return Q(A(u.add(h), 43).add(A(f, 30)).add(p), u.add(A(h.add(s), 18)).add(f), n);
|
|
702
702
|
}
|
|
703
703
|
function qn(e, t = e.length) {
|
|
704
704
|
const n = tt.fromNumber(81, !0);
|
|
@@ -706,15 +706,15 @@ function qn(e, t = e.length) {
|
|
|
706
706
|
return t <= 16 ? On(e, t) : Ln(e, t);
|
|
707
707
|
if (t <= 64)
|
|
708
708
|
return Pn(e, t);
|
|
709
|
-
let s = n, o = n.mul(
|
|
709
|
+
let s = n, o = n.mul(J).add(113), a = bt(o.mul(D).add(113)).mul(D), r = [tt.UZERO, tt.UZERO], i = [tt.UZERO, tt.UZERO];
|
|
710
710
|
s = s.mul(D).add(T(e, 0));
|
|
711
711
|
let l = 0;
|
|
712
712
|
const u = (t - 1 >> 6) * 64, h = u + (t - 1 & 63) - 63;
|
|
713
713
|
do
|
|
714
|
-
s = A(s.add(o).add(r[0]).add(T(e, l + 8)), 37).mul(
|
|
714
|
+
s = A(s.add(o).add(r[0]).add(T(e, l + 8)), 37).mul(J), o = A(o.add(r[1]).add(T(e, l + 48)), 42).mul(J), s = s.xor(i[1]), o = o.add(r[0]).add(T(e, l + 40)), a = A(a.add(i[0]), 33).mul(J), r = ut(e, l, r[1].mul(J), s.add(i[0])), i = ut(e, l + 32, a.add(i[1]), o.add(T(e, l + 16))), [a, s] = [s, a], l += 64;
|
|
715
715
|
while (l !== u);
|
|
716
|
-
const f =
|
|
717
|
-
return l = h, i[0] = i[0].add(t - 1 & 63), r[0] = r[0].add(i[0]), i[0] = i[0].add(r[0]), s = A(s.add(o).add(r[0]).add(T(e, l + 8)), 37).mul(f), o = A(o.add(r[1]).add(T(e, l + 48)), 42).mul(f), s = s.xor(i[1].mul(9)), o = o.add(r[0].mul(9).add(T(e, l + 40))), a = A(a.add(i[0]), 33).mul(f), r = ut(e, l, r[1].mul(f), s.add(i[0])), i = ut(e, l + 32, a.add(i[1]), o.add(T(e, l + 16))), [a, s] = [s, a],
|
|
716
|
+
const f = J.add(a.and(255).shl(1));
|
|
717
|
+
return l = h, i[0] = i[0].add(t - 1 & 63), r[0] = r[0].add(i[0]), i[0] = i[0].add(r[0]), s = A(s.add(o).add(r[0]).add(T(e, l + 8)), 37).mul(f), o = A(o.add(r[1]).add(T(e, l + 48)), 42).mul(f), s = s.xor(i[1].mul(9)), o = o.add(r[0].mul(9).add(T(e, l + 40))), a = A(a.add(i[0]), 33).mul(f), r = ut(e, l, r[1].mul(f), s.add(i[0])), i = ut(e, l + 32, a.add(i[1]), o.add(T(e, l + 16))), [a, s] = [s, a], Q(Q(r[0], i[0], f).add(bt(o).mul(ae)).add(a), Q(r[1], i[1], f).add(s), f);
|
|
718
718
|
}
|
|
719
719
|
function nt(e, t) {
|
|
720
720
|
Array.isArray(e) || (e = [e]), e.forEach((n) => {
|
|
@@ -733,12 +733,12 @@ const An = (e) => {
|
|
|
733
733
|
let s = new Float32Array(V(t.shape));
|
|
734
734
|
const o = n.data.get(t.dataId).values;
|
|
735
735
|
return s = le(o), n.makeOutput(s, t.shape, t.dtype);
|
|
736
|
-
},
|
|
736
|
+
}, Qs = {
|
|
737
737
|
kernelName: Ce,
|
|
738
738
|
backendName: "cpu",
|
|
739
739
|
kernelFunc: An
|
|
740
740
|
};
|
|
741
|
-
function
|
|
741
|
+
function W(e) {
|
|
742
742
|
return (t, n, s, o, a) => {
|
|
743
743
|
const r = Pt(t, n), i = r.length, l = K(r), u = V(r), h = et(a, u), f = t.length, p = n.length, w = K(t), m = K(n), F = ht(t, r), d = ht(n, r);
|
|
744
744
|
if (F.length + d.length === 0)
|
|
@@ -763,7 +763,7 @@ function mt(e) {
|
|
|
763
763
|
imag: n.makeTensorInfo(o.shape, "float32", r)
|
|
764
764
|
}, i;
|
|
765
765
|
}
|
|
766
|
-
const
|
|
766
|
+
const Ys = {
|
|
767
767
|
kernelName: De,
|
|
768
768
|
backendName: "cpu",
|
|
769
769
|
kernelFunc: mt
|
|
@@ -780,8 +780,8 @@ function Et(e) {
|
|
|
780
780
|
const { inputs: t, backend: n } = e, { x: s } = t;
|
|
781
781
|
return n.incRef(s.dataId), { dataId: s.dataId, shape: s.shape, dtype: s.dtype };
|
|
782
782
|
}
|
|
783
|
-
const
|
|
784
|
-
kernelName:
|
|
783
|
+
const Js = {
|
|
784
|
+
kernelName: Ue,
|
|
785
785
|
backendName: "cpu",
|
|
786
786
|
kernelFunc: Et
|
|
787
787
|
};
|
|
@@ -790,7 +790,7 @@ function ce(e) {
|
|
|
790
790
|
return n.makeTensorInfo(o.shape, o.dtype, a);
|
|
791
791
|
}
|
|
792
792
|
const to = {
|
|
793
|
-
kernelName:
|
|
793
|
+
kernelName: We,
|
|
794
794
|
backendName: "cpu",
|
|
795
795
|
kernelFunc: ce
|
|
796
796
|
};
|
|
@@ -800,7 +800,7 @@ function ue(e, t, n, s) {
|
|
|
800
800
|
return [t, "int32", o];
|
|
801
801
|
}
|
|
802
802
|
if (s === "bool") {
|
|
803
|
-
const o = Be([0], n), [a, r] =
|
|
803
|
+
const o = Be([0], n), [a, r] = W((i, l) => i !== l ? 1 : 0)(t, [], e, o, "bool");
|
|
804
804
|
return [r, "bool", a];
|
|
805
805
|
}
|
|
806
806
|
throw new Error(`Error in Cast: failed to cast ${n} to ${s}`);
|
|
@@ -872,7 +872,7 @@ function Ft(e) {
|
|
|
872
872
|
return [f, p, i];
|
|
873
873
|
};
|
|
874
874
|
}
|
|
875
|
-
const he =
|
|
875
|
+
const he = W(((e, t) => e + t)), _n = Ft(((e, t, n, s) => ({ real: e + n, imag: t + s }))), Vn = z(qt, he, _n), no = {
|
|
876
876
|
kernelName: qt,
|
|
877
877
|
backendName: "cpu",
|
|
878
878
|
kernelFunc: Vn
|
|
@@ -898,14 +898,14 @@ function Dn(e, t, n, s = !1) {
|
|
|
898
898
|
}
|
|
899
899
|
return r;
|
|
900
900
|
}
|
|
901
|
-
const fe =
|
|
901
|
+
const fe = W(((e, t) => e & t)), Un = z(At, fe), so = {
|
|
902
902
|
kernelName: At,
|
|
903
903
|
backendName: "cpu",
|
|
904
|
-
kernelFunc:
|
|
904
|
+
kernelFunc: Un
|
|
905
905
|
};
|
|
906
906
|
function X(e) {
|
|
907
907
|
return (t, n, s) => {
|
|
908
|
-
const o =
|
|
908
|
+
const o = U(n, t.length);
|
|
909
909
|
for (let a = 0; a < t.length; ++a)
|
|
910
910
|
o[a] = e(t[a], s);
|
|
911
911
|
return o;
|
|
@@ -913,9 +913,9 @@ function X(e) {
|
|
|
913
913
|
}
|
|
914
914
|
function de(e, t, n) {
|
|
915
915
|
const s = X(t);
|
|
916
|
-
return
|
|
916
|
+
return Y(e, s, n);
|
|
917
917
|
}
|
|
918
|
-
function
|
|
918
|
+
function Y(e, t, n) {
|
|
919
919
|
return ({ inputs: s, attrs: o, backend: a }) => {
|
|
920
920
|
const { x: r } = s;
|
|
921
921
|
nt(r, e);
|
|
@@ -931,13 +931,13 @@ function Q(e, t, n) {
|
|
|
931
931
|
return i.makeTensorInfo(r.shape, h, f);
|
|
932
932
|
};
|
|
933
933
|
}
|
|
934
|
-
const ge = X((e) => Math.ceil(e)),
|
|
934
|
+
const ge = X((e) => Math.ceil(e)), Wn = Y(_t, ge), oo = {
|
|
935
935
|
kernelName: _t,
|
|
936
936
|
backendName: "cpu",
|
|
937
|
-
kernelFunc:
|
|
937
|
+
kernelFunc: Wn
|
|
938
938
|
};
|
|
939
939
|
function $n(e, t, n, s) {
|
|
940
|
-
const o =
|
|
940
|
+
const o = U(n, V(t));
|
|
941
941
|
if (s && n !== "string") {
|
|
942
942
|
let a = 0;
|
|
943
943
|
e.forEach((r) => {
|
|
@@ -959,28 +959,28 @@ function $n(e, t, n, s) {
|
|
|
959
959
|
}
|
|
960
960
|
return o;
|
|
961
961
|
}
|
|
962
|
-
const me =
|
|
962
|
+
const me = W((e, t) => e === t ? 1 : 0), zn = z(Vt, me, null, "bool"), ro = {
|
|
963
963
|
kernelName: Vt,
|
|
964
964
|
backendName: "cpu",
|
|
965
965
|
kernelFunc: zn
|
|
966
966
|
};
|
|
967
|
-
const pe = X((e) => Math.exp(e)), Bn =
|
|
967
|
+
const pe = X((e) => Math.exp(e)), Bn = Y(Ct, pe, "float32"), ao = {
|
|
968
968
|
kernelName: Ct,
|
|
969
969
|
backendName: "cpu",
|
|
970
970
|
kernelFunc: Bn
|
|
971
971
|
};
|
|
972
|
-
const we = X((e) => Math.expm1(e)), jn =
|
|
972
|
+
const we = X((e) => Math.expm1(e)), jn = Y(Dt, we), io = {
|
|
973
973
|
kernelName: Dt,
|
|
974
974
|
backendName: "cpu",
|
|
975
975
|
kernelFunc: jn
|
|
976
976
|
};
|
|
977
|
-
const Ie = X((e) => Math.floor(e)), Gn =
|
|
978
|
-
kernelName:
|
|
977
|
+
const Ie = X((e) => Math.floor(e)), Gn = Y(Ut, Ie), lo = {
|
|
978
|
+
kernelName: Ut,
|
|
979
979
|
backendName: "cpu",
|
|
980
980
|
kernelFunc: Gn
|
|
981
981
|
};
|
|
982
|
-
const be =
|
|
983
|
-
kernelName:
|
|
982
|
+
const be = W((e, t) => Math.floor(e / t)), Zn = z(Wt, be, null, "int32"), co = {
|
|
983
|
+
kernelName: Wt,
|
|
984
984
|
backendName: "cpu",
|
|
985
985
|
kernelFunc: Zn
|
|
986
986
|
};
|
|
@@ -1010,25 +1010,25 @@ function Kn(e, t, n) {
|
|
|
1010
1010
|
}
|
|
1011
1011
|
return s;
|
|
1012
1012
|
}
|
|
1013
|
-
const xe =
|
|
1013
|
+
const xe = W((e, t) => e > t ? 1 : 0), Xn = z($t, xe, null, "bool"), uo = {
|
|
1014
1014
|
kernelName: $t,
|
|
1015
1015
|
backendName: "cpu",
|
|
1016
1016
|
kernelFunc: Xn
|
|
1017
1017
|
};
|
|
1018
|
-
const Ee =
|
|
1018
|
+
const Ee = W((e, t) => e >= t ? 1 : 0), Qn = z(zt, Ee, null, "bool"), ho = {
|
|
1019
1019
|
kernelName: zt,
|
|
1020
1020
|
backendName: "cpu",
|
|
1021
|
-
kernelFunc:
|
|
1021
|
+
kernelFunc: Qn
|
|
1022
1022
|
};
|
|
1023
|
-
const Fe =
|
|
1023
|
+
const Fe = W((e, t) => e < t ? 1 : 0), Yn = z(Bt, Fe, null, "bool"), fo = {
|
|
1024
1024
|
kernelName: Bt,
|
|
1025
1025
|
backendName: "cpu",
|
|
1026
|
-
kernelFunc:
|
|
1026
|
+
kernelFunc: Yn
|
|
1027
1027
|
};
|
|
1028
|
-
const ye =
|
|
1028
|
+
const ye = W((e, t) => e <= t ? 1 : 0), Jn = z(jt, ye, null, "bool"), go = {
|
|
1029
1029
|
kernelName: jt,
|
|
1030
1030
|
backendName: "cpu",
|
|
1031
|
-
kernelFunc:
|
|
1031
|
+
kernelFunc: Jn
|
|
1032
1032
|
};
|
|
1033
1033
|
function ts(e, t, n) {
|
|
1034
1034
|
const s = (t - e) / (n - 1), o = st(n, "float32");
|
|
@@ -1037,7 +1037,7 @@ function ts(e, t, n) {
|
|
|
1037
1037
|
o[a] = o[a - 1] + s;
|
|
1038
1038
|
return o;
|
|
1039
1039
|
}
|
|
1040
|
-
const ke = X((e) => Math.log(e)), es =
|
|
1040
|
+
const ke = X((e) => Math.log(e)), es = Y(Gt, ke), mo = {
|
|
1041
1041
|
kernelName: Gt,
|
|
1042
1042
|
backendName: "cpu",
|
|
1043
1043
|
kernelFunc: es
|
|
@@ -1055,17 +1055,17 @@ function ns(e, t, n, s) {
|
|
|
1055
1055
|
}
|
|
1056
1056
|
return o;
|
|
1057
1057
|
}
|
|
1058
|
-
const Ne =
|
|
1058
|
+
const Ne = W(((e, t) => Math.max(e, t))), ss = z(Zt, Ne), po = {
|
|
1059
1059
|
kernelName: Zt,
|
|
1060
1060
|
backendName: "cpu",
|
|
1061
1061
|
kernelFunc: ss
|
|
1062
1062
|
};
|
|
1063
|
-
const ve =
|
|
1063
|
+
const ve = W(((e, t) => Math.min(e, t))), os = z(Ht, ve), wo = {
|
|
1064
1064
|
kernelName: Ht,
|
|
1065
1065
|
backendName: "cpu",
|
|
1066
1066
|
kernelFunc: os
|
|
1067
1067
|
};
|
|
1068
|
-
const yt =
|
|
1068
|
+
const yt = W(((e, t) => e * t)), rs = Ft(((e, t, n, s) => ({
|
|
1069
1069
|
real: e * n - t * s,
|
|
1070
1070
|
imag: e * s + t * n
|
|
1071
1071
|
}))), as = z(Kt, yt, rs), Io = {
|
|
@@ -1088,7 +1088,7 @@ const bo = {
|
|
|
1088
1088
|
backendName: "cpu",
|
|
1089
1089
|
kernelFunc: is
|
|
1090
1090
|
};
|
|
1091
|
-
const Te =
|
|
1091
|
+
const Te = W(((e, t) => e !== t ? 1 : 0)), ls = z(Xt, Te, null, "bool"), xo = {
|
|
1092
1092
|
kernelName: Xt,
|
|
1093
1093
|
backendName: "cpu",
|
|
1094
1094
|
kernelFunc: ls
|
|
@@ -1132,7 +1132,7 @@ function Oe(e, t, n, s) {
|
|
|
1132
1132
|
function cs(e) {
|
|
1133
1133
|
const { inputs: t, backend: n, attrs: s } = e, { x: o } = t, { axis: a, keepDims: r } = s;
|
|
1134
1134
|
nt(o, "prod");
|
|
1135
|
-
const i = o.shape.length, l =
|
|
1135
|
+
const i = o.shape.length, l = Qt(a, o.shape), u = pn(l, i);
|
|
1136
1136
|
let h = l, f = o;
|
|
1137
1137
|
const p = [];
|
|
1138
1138
|
u != null && (f = Me({ inputs: { x: o }, backend: n, attrs: { perm: u } }), p.push(f), h = wn(h.length, i));
|
|
@@ -1197,7 +1197,7 @@ function fs(e, t, n, s) {
|
|
|
1197
1197
|
function ds(e) {
|
|
1198
1198
|
const t = [];
|
|
1199
1199
|
for (let n = 0; n < e.length; ++n) {
|
|
1200
|
-
const s = e[n].length, o =
|
|
1200
|
+
const s = e[n].length, o = U("int32", s);
|
|
1201
1201
|
t.push(o), e[n].forEach((a, r) => o[r] = a);
|
|
1202
1202
|
}
|
|
1203
1203
|
return t;
|
|
@@ -1223,7 +1223,7 @@ function gs(e, t, n, s, o, a) {
|
|
|
1223
1223
|
function ms(e, t, n, s, o) {
|
|
1224
1224
|
const a = t.slice();
|
|
1225
1225
|
a[0] = o;
|
|
1226
|
-
const r =
|
|
1226
|
+
const r = U(n, V(a)), i = e.length, l = i === 0 ? 0 : i / t[0];
|
|
1227
1227
|
return gs(e, t, s, l, r, a), [r, a];
|
|
1228
1228
|
}
|
|
1229
1229
|
function ps(e, t, n, s, o, a, r, i) {
|
|
@@ -1250,7 +1250,7 @@ function ws(e, t, n, s, o, a, r) {
|
|
|
1250
1250
|
for (let d = 1; d < h.length; ++d)
|
|
1251
1251
|
if (h[d] !== h[d - 1])
|
|
1252
1252
|
throw new Error("starts, limits, and deltas must have the same shape");
|
|
1253
|
-
const f = h.length === 0 ? 1 : h[0], p =
|
|
1253
|
+
const f = h.length === 0 ? 1 : h[0], p = U("int32", f + 1);
|
|
1254
1254
|
p[0] = 0;
|
|
1255
1255
|
for (let d = 0; d < f; ++d) {
|
|
1256
1256
|
const g = i ? e[0] : e[d], b = l ? s[0] : s[d], k = u ? a[0] : a[d];
|
|
@@ -1263,7 +1263,7 @@ function ws(e, t, n, s, o, a, r) {
|
|
|
1263
1263
|
throw new Error(`Requires ((limit - start) / delta) <= ${Rt}`);
|
|
1264
1264
|
p[d + 1] = p[d] + y;
|
|
1265
1265
|
}
|
|
1266
|
-
const w = p[f], m =
|
|
1266
|
+
const w = p[f], m = U(n, w);
|
|
1267
1267
|
let F = 0;
|
|
1268
1268
|
for (let d = 0; d < f; ++d) {
|
|
1269
1269
|
const g = p[d + 1] - p[d];
|
|
@@ -1277,7 +1277,7 @@ function ws(e, t, n, s, o, a, r) {
|
|
|
1277
1277
|
var Z = tn;
|
|
1278
1278
|
class ft {
|
|
1279
1279
|
constructor(t, n, s, o, a, r, i, l, u, h) {
|
|
1280
|
-
this.shape = t, this.shapeShape = n, this.values = s, this.valuesShape = o, this.valuesDType = a, this.defaultValue = r, this.defaultValueShape = i, this.rowPartitionValues = l, this.rowPartitionValuesShapes = u, this.rowPartitionTypes =
|
|
1280
|
+
this.shape = t, this.shapeShape = n, this.values = s, this.valuesShape = o, this.valuesDType = a, this.defaultValue = r, this.defaultValueShape = i, this.rowPartitionValues = l, this.rowPartitionValuesShapes = u, this.rowPartitionTypes = Ye(h), this.raggedRank = Je(this.rowPartitionTypes);
|
|
1281
1281
|
}
|
|
1282
1282
|
getRowPartitionTypeByDimension(t) {
|
|
1283
1283
|
return this.rowPartitionTypes[0] === Z.FIRST_DIM_SIZE ? this.rowPartitionTypes[t + 1] : this.rowPartitionTypes[t];
|
|
@@ -1451,7 +1451,7 @@ class ft {
|
|
|
1451
1451
|
o[o.length - 1] = 1;
|
|
1452
1452
|
for (let l = o.length - 2; l >= 0; --l)
|
|
1453
1453
|
o[l] = o[l + 1] * s[l + 1];
|
|
1454
|
-
const a = Ot(s, !1), r =
|
|
1454
|
+
const a = Ot(s, !1), r = U(this.valuesDType, V(a));
|
|
1455
1455
|
if (o[0] * s[0] > 0) {
|
|
1456
1456
|
let l = this.calculateFirstParentOutputIndex(n, o[0], s[0]);
|
|
1457
1457
|
for (let u = 1; u <= this.raggedRank; ++u)
|
|
@@ -1533,8 +1533,8 @@ function bs(e, t, n, s) {
|
|
|
1533
1533
|
l[u] = l[u - 1] + n;
|
|
1534
1534
|
return l;
|
|
1535
1535
|
}
|
|
1536
|
-
const Le = X((e) => 1 / Math.sqrt(e)), xs =
|
|
1537
|
-
kernelName:
|
|
1536
|
+
const Le = X((e) => 1 / Math.sqrt(e)), xs = Y(Yt, Le), yo = {
|
|
1537
|
+
kernelName: Yt,
|
|
1538
1538
|
backendName: "cpu",
|
|
1539
1539
|
kernelFunc: xs
|
|
1540
1540
|
};
|
|
@@ -1558,8 +1558,8 @@ function Es(e, t, n, s, o, a, r, i, l, u) {
|
|
|
1558
1558
|
}
|
|
1559
1559
|
return w;
|
|
1560
1560
|
}
|
|
1561
|
-
const Fs = X((e) => 1 / (1 + Math.exp(-e))), ys = de(
|
|
1562
|
-
kernelName:
|
|
1561
|
+
const Fs = X((e) => 1 / (1 + Math.exp(-e))), ys = de(Jt, (e) => 1 / (1 + Math.exp(-e))), ko = {
|
|
1562
|
+
kernelName: Jt,
|
|
1563
1563
|
backendName: "cpu",
|
|
1564
1564
|
kernelFunc: ys
|
|
1565
1565
|
};
|
|
@@ -1585,7 +1585,7 @@ function ks(e) {
|
|
|
1585
1585
|
return n.makeTensorInfo(l, o.dtype, h);
|
|
1586
1586
|
}
|
|
1587
1587
|
const No = {
|
|
1588
|
-
kernelName:
|
|
1588
|
+
kernelName: Qe,
|
|
1589
1589
|
backendName: "cpu",
|
|
1590
1590
|
kernelFunc: ks
|
|
1591
1591
|
};
|
|
@@ -1594,7 +1594,7 @@ function Ns(e, t, n, s, o, a, r) {
|
|
|
1594
1594
|
if (l === 0) {
|
|
1595
1595
|
if (i !== 0)
|
|
1596
1596
|
throw new Error(on(i));
|
|
1597
|
-
const d =
|
|
1597
|
+
const d = U(n, 0), g = U(o, 0);
|
|
1598
1598
|
return [
|
|
1599
1599
|
d,
|
|
1600
1600
|
[0, f],
|
|
@@ -1630,7 +1630,7 @@ function Ns(e, t, n, s, o, a, r) {
|
|
|
1630
1630
|
h
|
|
1631
1631
|
];
|
|
1632
1632
|
} else {
|
|
1633
|
-
const d = m[l - 1], g =
|
|
1633
|
+
const d = m[l - 1], g = U(n, d * f), b = U(o, d), k = new Array(l).fill(0);
|
|
1634
1634
|
for (let y = 0; y < i; ++y) {
|
|
1635
1635
|
const v = e[y * f], M = k[v], O = (v === 0 ? 0 : m[v - 1]) + M;
|
|
1636
1636
|
k[v]++;
|
|
@@ -1692,7 +1692,7 @@ function vs(e, t, n, s, o) {
|
|
|
1692
1692
|
for (let d = i - 2; d >= 0; --d)
|
|
1693
1693
|
m[d] = m[d + 1] * l[d + 1];
|
|
1694
1694
|
}
|
|
1695
|
-
const F =
|
|
1695
|
+
const F = U(n, r * i);
|
|
1696
1696
|
for (let d = 0; d < r; ++d) {
|
|
1697
1697
|
let g = 0;
|
|
1698
1698
|
for (let b = 0; b < p; ++b)
|
|
@@ -1708,7 +1708,7 @@ function Ss(e, t, n, s, o, a = !1, r = 0) {
|
|
|
1708
1708
|
throw new Error(kt());
|
|
1709
1709
|
const p = t.slice();
|
|
1710
1710
|
p[0] = f;
|
|
1711
|
-
const w = p.reduce((k, y) => k * y, 1), m =
|
|
1711
|
+
const w = p.reduce((k, y) => k * y, 1), m = U(n, w);
|
|
1712
1712
|
if (i === 0)
|
|
1713
1713
|
return f > 0 && m.fill(r), [m, p];
|
|
1714
1714
|
if (f <= 0)
|
|
@@ -1747,7 +1747,7 @@ const Ts = X((e) => Math.sqrt(e)), Rs = de(te, (e) => Math.sqrt(e)), vo = {
|
|
|
1747
1747
|
backendName: "cpu",
|
|
1748
1748
|
kernelFunc: Rs
|
|
1749
1749
|
};
|
|
1750
|
-
const qe =
|
|
1750
|
+
const qe = W(((e, t) => {
|
|
1751
1751
|
const n = e - t;
|
|
1752
1752
|
return n * n;
|
|
1753
1753
|
})), Ms = z(ee, qe), So = {
|
|
@@ -1758,7 +1758,7 @@ const qe = U(((e, t) => {
|
|
|
1758
1758
|
const Ae = X((e, t) => {
|
|
1759
1759
|
const { pattern: n, replaceGlobal: s, rewrite: o } = t;
|
|
1760
1760
|
return e.replace(new RegExp(n, s ? "g" : ""), o);
|
|
1761
|
-
}), Os =
|
|
1761
|
+
}), Os = Y(ne, Ae), To = {
|
|
1762
1762
|
kernelName: ne,
|
|
1763
1763
|
backendName: "cpu",
|
|
1764
1764
|
kernelFunc: Os
|
|
@@ -1830,7 +1830,7 @@ class Ps {
|
|
|
1830
1830
|
if (l !== s)
|
|
1831
1831
|
throw new Error(`Last split value must be data size. Expected ${s}, got ${l}`);
|
|
1832
1832
|
}
|
|
1833
|
-
const a = o - 1, r =
|
|
1833
|
+
const a = o - 1, r = U("int32", o);
|
|
1834
1834
|
if (s === 0 || o === 0) {
|
|
1835
1835
|
const l = new Array(s);
|
|
1836
1836
|
for (let u = 0; u <= a; ++u)
|
|
@@ -1901,7 +1901,7 @@ function _s(e, t, n) {
|
|
|
1901
1901
|
const m = o.length - w;
|
|
1902
1902
|
i[p] = m, a += m, r = Math.max(r, m);
|
|
1903
1903
|
}
|
|
1904
|
-
const l =
|
|
1904
|
+
const l = U("int32", a * 2), u = new Array(a), h = [s, r];
|
|
1905
1905
|
let f = 0;
|
|
1906
1906
|
for (let p = 0; p < s; ++p)
|
|
1907
1907
|
for (let w = 0; w < i[p]; ++w)
|
|
@@ -1909,17 +1909,17 @@ function _s(e, t, n) {
|
|
|
1909
1909
|
return [l, u, h];
|
|
1910
1910
|
}
|
|
1911
1911
|
function Vs(e, t) {
|
|
1912
|
-
const n =
|
|
1912
|
+
const n = U("int32", e.length);
|
|
1913
1913
|
for (let s = 0; s < e.length; ++s)
|
|
1914
1914
|
n[s] = qn(e[s]).modulo(t).getLowBitsUnsigned();
|
|
1915
1915
|
return n;
|
|
1916
1916
|
}
|
|
1917
|
-
const _e =
|
|
1917
|
+
const _e = W(((e, t) => e - t)), Cs = Ft(((e, t, n, s) => ({ real: e - n, imag: t - s }))), Ds = z(se, _e, Cs), Ro = {
|
|
1918
1918
|
kernelName: se,
|
|
1919
1919
|
backendName: "cpu",
|
|
1920
1920
|
kernelFunc: Ds
|
|
1921
1921
|
};
|
|
1922
|
-
function
|
|
1922
|
+
function Us(e, t) {
|
|
1923
1923
|
const n = new Array(e.rank);
|
|
1924
1924
|
for (let o = 0; o < n.length; o++)
|
|
1925
1925
|
n[o] = e.shape[o] * t[o];
|
|
@@ -1954,7 +1954,7 @@ function Ve(e, t, n = 0, s = e.length - 1) {
|
|
|
1954
1954
|
rt(e[n], o) === 0 ? ot(e, n, r) : (r = r + 1, ot(e, r, s)), r <= t && (n = r + 1), t <= r && (s = r - 1);
|
|
1955
1955
|
}
|
|
1956
1956
|
}
|
|
1957
|
-
function
|
|
1957
|
+
function Ws(e, t, n, s, o) {
|
|
1958
1958
|
const a = t[t.length - 1], [r, i] = [e.length / a, a], l = et(n, r * s), u = et("int32", r * s);
|
|
1959
1959
|
for (let f = 0; f < r; f++) {
|
|
1960
1960
|
const p = f * i, w = e.subarray(p, p + i);
|
|
@@ -1971,7 +1971,7 @@ function Us(e, t, n, s, o) {
|
|
|
1971
1971
|
];
|
|
1972
1972
|
}
|
|
1973
1973
|
function $s(e, t, n, s) {
|
|
1974
|
-
const o =
|
|
1974
|
+
const o = Qt(t, n)[0], a = [1, n[0], 1];
|
|
1975
1975
|
for (let m = 0; m < o; m++)
|
|
1976
1976
|
a[0] *= n[m];
|
|
1977
1977
|
a[1] = n[o];
|
|
@@ -2061,8 +2061,8 @@ const Mo = /* @__PURE__ */ Object.freeze(/* @__PURE__ */ Object.defineProperty({
|
|
|
2061
2061
|
stringSplitImpl: _s,
|
|
2062
2062
|
stringToHashBucketFastImpl: Vs,
|
|
2063
2063
|
subImpl: _e,
|
|
2064
|
-
tileImpl:
|
|
2065
|
-
topKImpl:
|
|
2064
|
+
tileImpl: Us,
|
|
2065
|
+
topKImpl: Ws,
|
|
2066
2066
|
transposeImpl: Re,
|
|
2067
2067
|
uniqueImpl: $s
|
|
2068
2068
|
}, Symbol.toStringTag, { value: "Module" }));
|
|
@@ -2079,16 +2079,16 @@ export {
|
|
|
2079
2079
|
qs as I,
|
|
2080
2080
|
_s as J,
|
|
2081
2081
|
Vs as K,
|
|
2082
|
-
|
|
2083
|
-
|
|
2082
|
+
Us as L,
|
|
2083
|
+
Ws as M,
|
|
2084
2084
|
$s as N,
|
|
2085
2085
|
zn as O,
|
|
2086
|
-
|
|
2086
|
+
Qs as P,
|
|
2087
2087
|
no as Q,
|
|
2088
2088
|
so as R,
|
|
2089
2089
|
eo as S,
|
|
2090
2090
|
oo as T,
|
|
2091
|
-
|
|
2091
|
+
Ys as U,
|
|
2092
2092
|
ro as V,
|
|
2093
2093
|
ao as W,
|
|
2094
2094
|
io as X,
|
|
@@ -2096,7 +2096,7 @@ export {
|
|
|
2096
2096
|
co as Z,
|
|
2097
2097
|
uo as _,
|
|
2098
2098
|
nt as a,
|
|
2099
|
-
|
|
2099
|
+
Js as a0,
|
|
2100
2100
|
fo as a1,
|
|
2101
2101
|
go as a2,
|
|
2102
2102
|
mo as a3,
|
|
@@ -2116,7 +2116,7 @@ export {
|
|
|
2116
2116
|
Ro as ah,
|
|
2117
2117
|
Eo as ai,
|
|
2118
2118
|
ys as b,
|
|
2119
|
-
|
|
2119
|
+
W as c,
|
|
2120
2120
|
Vn as d,
|
|
2121
2121
|
z as e,
|
|
2122
2122
|
ks as f,
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { o as p, q as i, E as a, J as c } from "./index-Duu1Lvvv.js";
|
|
2
|
+
function e(t, s, o = 0) {
|
|
3
|
+
const n = { x: i(t, "x", "split") }, r = { numOrSizeSplits: s, axis: o };
|
|
4
|
+
return a.runKernel(c, n, r);
|
|
5
|
+
}
|
|
6
|
+
const u = /* @__PURE__ */ p({ split_: e });
|
|
7
|
+
export {
|
|
8
|
+
u as s
|
|
9
|
+
};
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { o as r, q as n, K as a } from "./index-Duu1Lvvv.js";
|
|
2
|
+
import { r as t } from "./reshape-BI0yzp1T.js";
|
|
3
|
+
function p(s, o) {
|
|
4
|
+
const e = n(s, "x", "squeeze", "string_or_numeric");
|
|
5
|
+
return t(e, a(e.shape, o).newShape);
|
|
6
|
+
}
|
|
7
|
+
const i = /* @__PURE__ */ r({ squeeze_: p });
|
|
8
|
+
export {
|
|
9
|
+
i as s
|
|
10
|
+
};
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { o as e, y as c, x as n, E as k, P as i } from "./index-Duu1Lvvv.js";
|
|
2
2
|
function u(r, t = 0) {
|
|
3
3
|
const s = c(r, "tensors", "stack", "string_or_numeric");
|
|
4
4
|
n(s.length >= 1, () => "Pass at least one tensor to tf.stack"), s.length > 0 && n(t <= s[0].rank, () => "Axis must be <= rank of the tensor");
|